blob: b2ab213f43dead83370a883ad64e8875de964c41 [file] [log] [blame]
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001/*
2 * NVMe over Fabrics common host code.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/init.h>
16#include <linux/miscdevice.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/parser.h>
20#include <linux/seq_file.h>
21#include "nvme.h"
22#include "fabrics.h"
23
24static LIST_HEAD(nvmf_transports);
Roland Dreier489beb92017-08-29 10:33:44 -070025static DECLARE_RWSEM(nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020026
27static LIST_HEAD(nvmf_hosts);
28static DEFINE_MUTEX(nvmf_hosts_mutex);
29
30static struct nvmf_host *nvmf_default_host;
31
32static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
33{
34 struct nvmf_host *host;
35
36 list_for_each_entry(host, &nvmf_hosts, list) {
37 if (!strcmp(host->nqn, hostnqn))
38 return host;
39 }
40
41 return NULL;
42}
43
44static struct nvmf_host *nvmf_host_add(const char *hostnqn)
45{
46 struct nvmf_host *host;
47
48 mutex_lock(&nvmf_hosts_mutex);
49 host = __nvmf_host_find(hostnqn);
Christoph Hellwig98096d82016-08-18 11:16:35 -070050 if (host) {
51 kref_get(&host->ref);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020052 goto out_unlock;
Christoph Hellwig98096d82016-08-18 11:16:35 -070053 }
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020054
55 host = kmalloc(sizeof(*host), GFP_KERNEL);
56 if (!host)
57 goto out_unlock;
58
59 kref_init(&host->ref);
Hannes Reinecke1e5f4462018-05-25 11:04:03 +020060 strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020061
62 list_add_tail(&host->list, &nvmf_hosts);
63out_unlock:
64 mutex_unlock(&nvmf_hosts_mutex);
65 return host;
66}
67
68static struct nvmf_host *nvmf_host_default(void)
69{
70 struct nvmf_host *host;
71
72 host = kmalloc(sizeof(*host), GFP_KERNEL);
73 if (!host)
74 return NULL;
75
76 kref_init(&host->ref);
Ewan D. Milne6b018232018-01-05 12:44:06 -050077 uuid_gen(&host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020078 snprintf(host->nqn, NVMF_NQN_SIZE,
Daniel Verkamp40a5fce2017-08-30 15:18:19 -070079 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020080
81 mutex_lock(&nvmf_hosts_mutex);
82 list_add_tail(&host->list, &nvmf_hosts);
83 mutex_unlock(&nvmf_hosts_mutex);
84
85 return host;
86}
87
88static void nvmf_host_destroy(struct kref *ref)
89{
90 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
91
Ming Line76debd2016-07-01 12:13:32 -070092 mutex_lock(&nvmf_hosts_mutex);
93 list_del(&host->list);
94 mutex_unlock(&nvmf_hosts_mutex);
95
Christoph Hellwig07bfcd02016-06-13 16:45:26 +020096 kfree(host);
97}
98
99static void nvmf_host_put(struct nvmf_host *host)
100{
101 if (host)
102 kref_put(&host->ref, nvmf_host_destroy);
103}
104
105/**
106 * nvmf_get_address() - Get address/port
107 * @ctrl: Host NVMe controller instance which we got the address
108 * @buf: OUTPUT parameter that will contain the address/port
109 * @size: buffer size
110 */
111int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
112{
James Smart0fe51ff2016-08-02 10:40:01 +0300113 int len = 0;
114
115 if (ctrl->opts->mask & NVMF_OPT_TRADDR)
116 len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
117 if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
118 len += snprintf(buf + len, size - len, "%strsvcid=%s",
119 (len) ? "," : "", ctrl->opts->trsvcid);
James Smart478bcb92016-08-02 10:42:10 +0300120 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
121 len += snprintf(buf + len, size - len, "%shost_traddr=%s",
122 (len) ? "," : "", ctrl->opts->host_traddr);
James Smart0fe51ff2016-08-02 10:40:01 +0300123 len += snprintf(buf + len, size - len, "\n");
124
125 return len;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200126}
127EXPORT_SYMBOL_GPL(nvmf_get_address);
128
129/**
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200130 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
131 * @ctrl: Host NVMe controller instance maintaining the admin
132 * queue used to submit the property read command to
133 * the allocated NVMe controller resource on the target system.
134 * @off: Starting offset value of the targeted property
135 * register (see the fabrics section of the NVMe standard).
136 * @val: OUTPUT parameter that will contain the value of
137 * the property after a successful read.
138 *
139 * Used by the host system to retrieve a 32-bit capsule property value
140 * from an NVMe controller on the target system.
141 *
142 * ("Capsule property" is an "PCIe register concept" applied to the
143 * NVMe fabrics space.)
144 *
145 * Return:
146 * 0: successful read
147 * > 0: NVMe error status code
148 * < 0: Linux errno error code
149 */
150int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
151{
152 struct nvme_command cmd;
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800153 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200154 int ret;
155
156 memset(&cmd, 0, sizeof(cmd));
157 cmd.prop_get.opcode = nvme_fabrics_command;
158 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
159 cmd.prop_get.offset = cpu_to_le32(off);
160
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800161 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800162 NVME_QID_ANY, 0, 0, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200163
164 if (ret >= 0)
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800165 *val = le64_to_cpu(res.u64);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200166 if (unlikely(ret != 0))
167 dev_err(ctrl->device,
168 "Property Get error: %d, offset %#x\n",
169 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
170
171 return ret;
172}
173EXPORT_SYMBOL_GPL(nvmf_reg_read32);
174
175/**
176 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
177 * @ctrl: Host NVMe controller instance maintaining the admin
178 * queue used to submit the property read command to
179 * the allocated controller resource on the target system.
180 * @off: Starting offset value of the targeted property
181 * register (see the fabrics section of the NVMe standard).
182 * @val: OUTPUT parameter that will contain the value of
183 * the property after a successful read.
184 *
185 * Used by the host system to retrieve a 64-bit capsule property value
186 * from an NVMe controller on the target system.
187 *
188 * ("Capsule property" is an "PCIe register concept" applied to the
189 * NVMe fabrics space.)
190 *
191 * Return:
192 * 0: successful read
193 * > 0: NVMe error status code
194 * < 0: Linux errno error code
195 */
196int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
197{
198 struct nvme_command cmd;
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800199 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200200 int ret;
201
202 memset(&cmd, 0, sizeof(cmd));
203 cmd.prop_get.opcode = nvme_fabrics_command;
204 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
205 cmd.prop_get.attrib = 1;
206 cmd.prop_get.offset = cpu_to_le32(off);
207
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800208 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800209 NVME_QID_ANY, 0, 0, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200210
211 if (ret >= 0)
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800212 *val = le64_to_cpu(res.u64);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200213 if (unlikely(ret != 0))
214 dev_err(ctrl->device,
215 "Property Get error: %d, offset %#x\n",
216 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
217 return ret;
218}
219EXPORT_SYMBOL_GPL(nvmf_reg_read64);
220
221/**
222 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
223 * @ctrl: Host NVMe controller instance maintaining the admin
224 * queue used to submit the property read command to
225 * the allocated NVMe controller resource on the target system.
226 * @off: Starting offset value of the targeted property
227 * register (see the fabrics section of the NVMe standard).
228 * @val: Input parameter that contains the value to be
229 * written to the property.
230 *
231 * Used by the NVMe host system to write a 32-bit capsule property value
232 * to an NVMe controller on the target system.
233 *
234 * ("Capsule property" is an "PCIe register concept" applied to the
235 * NVMe fabrics space.)
236 *
237 * Return:
238 * 0: successful write
239 * > 0: NVMe error status code
240 * < 0: Linux errno error code
241 */
242int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
243{
244 struct nvme_command cmd;
245 int ret;
246
247 memset(&cmd, 0, sizeof(cmd));
248 cmd.prop_set.opcode = nvme_fabrics_command;
249 cmd.prop_set.fctype = nvme_fabrics_type_property_set;
250 cmd.prop_set.attrib = 0;
251 cmd.prop_set.offset = cpu_to_le32(off);
252 cmd.prop_set.value = cpu_to_le64(val);
253
254 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800255 NVME_QID_ANY, 0, 0, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200256 if (unlikely(ret))
257 dev_err(ctrl->device,
258 "Property Set error: %d, offset %#x\n",
259 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
260 return ret;
261}
262EXPORT_SYMBOL_GPL(nvmf_reg_write32);
263
264/**
265 * nvmf_log_connect_error() - Error-parsing-diagnostic print
266 * out function for connect() errors.
267 *
268 * @ctrl: the specific /dev/nvmeX device that had the error.
269 *
270 * @errval: Error code to be decoded in a more human-friendly
271 * printout.
272 *
273 * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
274 *
275 * @cmd: This is the SQE portion of a submission capsule.
276 *
277 * @data: This is the "Data" portion of a submission capsule.
278 */
279static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
280 int errval, int offset, struct nvme_command *cmd,
281 struct nvmf_connect_data *data)
282{
283 int err_sctype = errval & (~NVME_SC_DNR);
284
285 switch (err_sctype) {
286
287 case (NVME_SC_CONNECT_INVALID_PARAM):
288 if (offset >> 16) {
289 char *inv_data = "Connect Invalid Data Parameter";
290
291 switch (offset & 0xffff) {
292 case (offsetof(struct nvmf_connect_data, cntlid)):
293 dev_err(ctrl->device,
294 "%s, cntlid: %d\n",
295 inv_data, data->cntlid);
296 break;
297 case (offsetof(struct nvmf_connect_data, hostnqn)):
298 dev_err(ctrl->device,
299 "%s, hostnqn \"%s\"\n",
300 inv_data, data->hostnqn);
301 break;
302 case (offsetof(struct nvmf_connect_data, subsysnqn)):
303 dev_err(ctrl->device,
304 "%s, subsysnqn \"%s\"\n",
305 inv_data, data->subsysnqn);
306 break;
307 default:
308 dev_err(ctrl->device,
309 "%s, starting byte offset: %d\n",
310 inv_data, offset & 0xffff);
311 break;
312 }
313 } else {
314 char *inv_sqe = "Connect Invalid SQE Parameter";
315
316 switch (offset) {
317 case (offsetof(struct nvmf_connect_command, qid)):
318 dev_err(ctrl->device,
319 "%s, qid %d\n",
320 inv_sqe, cmd->connect.qid);
321 break;
322 default:
323 dev_err(ctrl->device,
324 "%s, starting byte offset: %d\n",
325 inv_sqe, offset);
326 }
327 }
328 break;
Guan Junxiong97ddc36e2017-06-13 10:51:24 +0800329
330 case NVME_SC_CONNECT_INVALID_HOST:
331 dev_err(ctrl->device,
332 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
333 data->subsysnqn, data->hostnqn);
334 break;
335
336 case NVME_SC_CONNECT_CTRL_BUSY:
337 dev_err(ctrl->device,
338 "Connect command failed: controller is busy or not available\n");
339 break;
340
341 case NVME_SC_CONNECT_FORMAT:
342 dev_err(ctrl->device,
343 "Connect incompatible format: %d",
344 cmd->connect.recfmt);
345 break;
346
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200347 default:
348 dev_err(ctrl->device,
349 "Connect command failed, error wo/DNR bit: %d\n",
350 err_sctype);
351 break;
352 } /* switch (err_sctype) */
353}
354
355/**
356 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
357 * API function.
358 * @ctrl: Host nvme controller instance used to request
359 * a new NVMe controller allocation on the target
360 * system and establish an NVMe Admin connection to
361 * that controller.
362 *
363 * This function enables an NVMe host device to request a new allocation of
364 * an NVMe controller resource on a target system as well establish a
365 * fabrics-protocol connection of the NVMe Admin queue between the
366 * host system device and the allocated NVMe controller on the
367 * target system via a NVMe Fabrics "Connect" command.
368 *
369 * Return:
370 * 0: success
371 * > 0: NVMe error status code
372 * < 0: Linux errno error code
373 *
374 */
375int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
376{
377 struct nvme_command cmd;
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800378 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200379 struct nvmf_connect_data *data;
380 int ret;
381
382 memset(&cmd, 0, sizeof(cmd));
383 cmd.connect.opcode = nvme_fabrics_command;
384 cmd.connect.fctype = nvme_fabrics_type_connect;
385 cmd.connect.qid = 0;
Sagi Grimberg7aa1f422017-06-18 16:15:59 +0300386 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
Jay Freyenseef994d9d2016-08-17 15:00:26 -0700387
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200388 /*
389 * Set keep-alive timeout in seconds granularity (ms * 1000)
390 * and add a grace period for controller kato enforcement
391 */
392 cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
393 cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200394
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800395 if (ctrl->opts->disable_sqflow)
396 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
397
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200398 data = kzalloc(sizeof(*data), GFP_KERNEL);
399 if (!data)
400 return -ENOMEM;
401
Christoph Hellwig8e412262017-05-17 09:54:27 +0200402 uuid_copy(&data->hostid, &ctrl->opts->host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200403 data->cntlid = cpu_to_le16(0xffff);
404 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
405 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
406
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800407 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200408 data, sizeof(*data), 0, NVME_QID_ANY, 1,
Sagi Grimberg6287b512018-12-14 11:06:07 -0800409 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200410 if (ret) {
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800411 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200412 &cmd, data);
413 goto out_free_data;
414 }
415
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800416 ctrl->cntlid = le16_to_cpu(res.u16);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200417
418out_free_data:
419 kfree(data);
420 return ret;
421}
422EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
423
424/**
425 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
426 * API function.
427 * @ctrl: Host nvme controller instance used to establish an
428 * NVMe I/O queue connection to the already allocated NVMe
429 * controller on the target system.
430 * @qid: NVMe I/O queue number for the new I/O connection between
431 * host and target (note qid == 0 is illegal as this is
432 * the Admin queue, per NVMe standard).
433 *
434 * This function issues a fabrics-protocol connection
435 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
436 * between the host system device and the allocated NVMe controller
437 * on the target system.
438 *
439 * Return:
440 * 0: success
441 * > 0: NVMe error status code
442 * < 0: Linux errno error code
443 */
Sagi Grimberg26c68222018-12-14 11:06:08 -0800444int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200445{
446 struct nvme_command cmd;
447 struct nvmf_connect_data *data;
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800448 union nvme_result res;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200449 int ret;
450
451 memset(&cmd, 0, sizeof(cmd));
452 cmd.connect.opcode = nvme_fabrics_command;
453 cmd.connect.fctype = nvme_fabrics_type_connect;
454 cmd.connect.qid = cpu_to_le16(qid);
455 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
456
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800457 if (ctrl->opts->disable_sqflow)
458 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
459
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200460 data = kzalloc(sizeof(*data), GFP_KERNEL);
461 if (!data)
462 return -ENOMEM;
463
Christoph Hellwig8e412262017-05-17 09:54:27 +0200464 uuid_copy(&data->hostid, &ctrl->opts->host->id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200465 data->cntlid = cpu_to_le16(ctrl->cntlid);
466 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
467 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
468
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800469 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200470 data, sizeof(*data), 0, qid, 1,
Sagi Grimberg26c68222018-12-14 11:06:08 -0800471 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200472 if (ret) {
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800473 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200474 &cmd, data);
475 }
476 kfree(data);
477 return ret;
478}
479EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
480
Sagi Grimberg42a45272017-03-18 20:52:36 +0200481bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
482{
Tal Shorer66414e82018-08-07 23:42:39 +0300483 if (ctrl->opts->max_reconnects == -1 ||
Sagi Grimbergfdf9dfa2017-05-04 13:33:15 +0300484 ctrl->nr_reconnects < ctrl->opts->max_reconnects)
Sagi Grimberg42a45272017-03-18 20:52:36 +0200485 return true;
486
487 return false;
488}
489EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
490
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200491/**
492 * nvmf_register_transport() - NVMe Fabrics Library registration function.
493 * @ops: Transport ops instance to be registered to the
494 * common fabrics library.
495 *
496 * API function that registers the type of specific transport fabric
497 * being implemented to the common NVMe fabrics library. Part of
498 * the overall init sequence of starting up a fabrics driver.
499 */
Johannes Thumshirne5a39dd2017-01-27 09:03:45 +0100500int nvmf_register_transport(struct nvmf_transport_ops *ops)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200501{
Christoph Hellwig5a1e5952018-02-22 07:24:08 -0800502 if (!ops->create_ctrl)
Johannes Thumshirne5a39dd2017-01-27 09:03:45 +0100503 return -EINVAL;
504
Roland Dreier489beb92017-08-29 10:33:44 -0700505 down_write(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200506 list_add_tail(&ops->entry, &nvmf_transports);
Roland Dreier489beb92017-08-29 10:33:44 -0700507 up_write(&nvmf_transports_rwsem);
Johannes Thumshirne5a39dd2017-01-27 09:03:45 +0100508
509 return 0;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200510}
511EXPORT_SYMBOL_GPL(nvmf_register_transport);
512
513/**
514 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
515 * @ops: Transport ops instance to be unregistered from the
516 * common fabrics library.
517 *
518 * Fabrics API function that unregisters the type of specific transport
519 * fabric being implemented from the common NVMe fabrics library.
520 * Part of the overall exit sequence of unloading the implemented driver.
521 */
522void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
523{
Roland Dreier489beb92017-08-29 10:33:44 -0700524 down_write(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200525 list_del(&ops->entry);
Roland Dreier489beb92017-08-29 10:33:44 -0700526 up_write(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200527}
528EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
529
530static struct nvmf_transport_ops *nvmf_lookup_transport(
531 struct nvmf_ctrl_options *opts)
532{
533 struct nvmf_transport_ops *ops;
534
Roland Dreier489beb92017-08-29 10:33:44 -0700535 lockdep_assert_held(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200536
537 list_for_each_entry(ops, &nvmf_transports, entry) {
538 if (strcmp(ops->name, opts->transport) == 0)
539 return ops;
540 }
541
542 return NULL;
543}
544
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200545/*
546 * For something we're not in a state to send to the device the default action
547 * is to busy it and retry it after the controller state is recovered. However,
James Smart6cdefc62018-07-20 15:49:48 -0700548 * if the controller is deleting or if anything is marked for failfast or
549 * nvme multipath it is immediately failed.
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200550 *
551 * Note: commands used to initialize the controller will be marked for failfast.
552 * Note: nvme cli/ioctl commands are marked for failfast.
553 */
James Smart6cdefc62018-07-20 15:49:48 -0700554blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
555 struct request *rq)
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200556{
James Smart6cdefc62018-07-20 15:49:48 -0700557 if (ctrl->state != NVME_CTRL_DELETING &&
558 ctrl->state != NVME_CTRL_DEAD &&
559 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200560 return BLK_STS_RESOURCE;
James Smart783f4a42018-09-27 16:58:54 -0700561
562 nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
563 blk_mq_start_request(rq);
564 nvme_complete_rq(rq);
565 return BLK_STS_OK;
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200566}
567EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
568
569bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
570 bool queue_live)
James Smartbb06ec312018-04-12 09:16:15 -0600571{
Christoph Hellwig35897b92018-06-11 17:41:11 +0200572 struct nvme_request *req = nvme_req(rq);
James Smartbb06ec312018-04-12 09:16:15 -0600573
Christoph Hellwig35897b92018-06-11 17:41:11 +0200574 /*
575 * If we are in some state of setup or teardown only allow
576 * internally generated commands.
577 */
578 if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
579 return false;
580
581 /*
582 * Only allow commands on a live queue, except for the connect command,
583 * which is require to set the queue live in the appropinquate states.
584 */
James Smartbb06ec312018-04-12 09:16:15 -0600585 switch (ctrl->state) {
James Smartbb06ec312018-04-12 09:16:15 -0600586 case NVME_CTRL_NEW:
587 case NVME_CTRL_CONNECTING:
Christoph Hellwig35897b92018-06-11 17:41:11 +0200588 if (req->cmd->common.opcode == nvme_fabrics_command &&
589 req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200590 return true;
Christoph Hellwig35897b92018-06-11 17:41:11 +0200591 break;
James Smartbb06ec312018-04-12 09:16:15 -0600592 default:
Christoph Hellwig35897b92018-06-11 17:41:11 +0200593 break;
594 case NVME_CTRL_DEAD:
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200595 return false;
James Smartbb06ec312018-04-12 09:16:15 -0600596 }
Christoph Hellwig35897b92018-06-11 17:41:11 +0200597
598 return queue_live;
James Smartbb06ec312018-04-12 09:16:15 -0600599}
Christoph Hellwig3bc32bb2018-06-11 17:34:06 +0200600EXPORT_SYMBOL_GPL(__nvmf_check_ready);
James Smartbb06ec312018-04-12 09:16:15 -0600601
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200602static const match_table_t opt_tokens = {
603 { NVMF_OPT_TRANSPORT, "transport=%s" },
604 { NVMF_OPT_TRADDR, "traddr=%s" },
605 { NVMF_OPT_TRSVCID, "trsvcid=%s" },
606 { NVMF_OPT_NQN, "nqn=%s" },
607 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
608 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200609 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
Sagi Grimberg42a45272017-03-18 20:52:36 +0200610 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200611 { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200612 { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
James Smart478bcb92016-08-02 10:42:10 +0300613 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200614 { NVMF_OPT_HOST_ID, "hostid=%s" },
James Smart3b338762017-10-20 16:17:06 -0700615 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800616 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
Sagi Grimberg3b49fa82018-12-03 17:52:12 -0800617 { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
Sagi Grimberg20d44e82018-12-03 17:52:13 -0800618 { NVMF_OPT_DATA_DIGEST, "data_digest" },
Sagi Grimberg330f6b82018-12-11 23:38:56 -0800619 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
Sagi Grimberg89d43802018-12-14 11:06:09 -0800620 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200621 { NVMF_OPT_ERR, NULL }
622};
623
624static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
625 const char *buf)
626{
627 substring_t args[MAX_OPT_ARGS];
628 char *options, *o, *p;
629 int token, ret = 0;
630 size_t nqnlen = 0;
Sagi Grimberg42a45272017-03-18 20:52:36 +0200631 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200632 uuid_t hostid;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200633
634 /* Set defaults */
635 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
636 opts->nr_io_queues = num_online_cpus();
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200637 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
Guilherme G. Piccoli8edd11c2017-09-14 13:59:28 -0300638 opts->kato = NVME_DEFAULT_KATO;
James Smart3b338762017-10-20 16:17:06 -0700639 opts->duplicate_connect = false;
Sagi Grimberg3b49fa82018-12-03 17:52:12 -0800640 opts->hdr_digest = false;
Sagi Grimberg20d44e82018-12-03 17:52:13 -0800641 opts->data_digest = false;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200642
643 options = o = kstrdup(buf, GFP_KERNEL);
644 if (!options)
645 return -ENOMEM;
646
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200647 uuid_gen(&hostid);
648
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200649 while ((p = strsep(&o, ",\n")) != NULL) {
650 if (!*p)
651 continue;
652
653 token = match_token(p, opt_tokens, args);
654 opts->mask |= token;
655 switch (token) {
656 case NVMF_OPT_TRANSPORT:
657 p = match_strdup(args);
658 if (!p) {
659 ret = -ENOMEM;
660 goto out;
661 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800662 kfree(opts->transport);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200663 opts->transport = p;
664 break;
665 case NVMF_OPT_NQN:
666 p = match_strdup(args);
667 if (!p) {
668 ret = -ENOMEM;
669 goto out;
670 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800671 kfree(opts->subsysnqn);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200672 opts->subsysnqn = p;
673 nqnlen = strlen(opts->subsysnqn);
674 if (nqnlen >= NVMF_NQN_SIZE) {
675 pr_err("%s needs to be < %d bytes\n",
Bart Van Assche6eb72832016-10-18 13:10:24 -0700676 opts->subsysnqn, NVMF_NQN_SIZE);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200677 ret = -EINVAL;
678 goto out;
679 }
680 opts->discovery_nqn =
681 !(strcmp(opts->subsysnqn,
682 NVME_DISC_SUBSYS_NAME));
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200683 break;
684 case NVMF_OPT_TRADDR:
685 p = match_strdup(args);
686 if (!p) {
687 ret = -ENOMEM;
688 goto out;
689 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800690 kfree(opts->traddr);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200691 opts->traddr = p;
692 break;
693 case NVMF_OPT_TRSVCID:
694 p = match_strdup(args);
695 if (!p) {
696 ret = -ENOMEM;
697 goto out;
698 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800699 kfree(opts->trsvcid);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200700 opts->trsvcid = p;
701 break;
702 case NVMF_OPT_QUEUE_SIZE:
703 if (match_int(args, &token)) {
704 ret = -EINVAL;
705 goto out;
706 }
707 if (token < NVMF_MIN_QUEUE_SIZE ||
708 token > NVMF_MAX_QUEUE_SIZE) {
709 pr_err("Invalid queue_size %d\n", token);
710 ret = -EINVAL;
711 goto out;
712 }
713 opts->queue_size = token;
714 break;
715 case NVMF_OPT_NR_IO_QUEUES:
716 if (match_int(args, &token)) {
717 ret = -EINVAL;
718 goto out;
719 }
720 if (token <= 0) {
721 pr_err("Invalid number of IOQs %d\n", token);
722 ret = -EINVAL;
723 goto out;
724 }
Roland Dreier04758212018-03-05 11:59:53 -0800725 if (opts->discovery_nqn) {
726 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
727 break;
728 }
729
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200730 opts->nr_io_queues = min_t(unsigned int,
731 num_online_cpus(), token);
732 break;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200733 case NVMF_OPT_KATO:
734 if (match_int(args, &token)) {
735 ret = -EINVAL;
736 goto out;
737 }
738
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200739 if (token < 0) {
740 pr_err("Invalid keep_alive_tmo %d\n", token);
741 ret = -EINVAL;
742 goto out;
Guilherme G. Piccoli8edd11c2017-09-14 13:59:28 -0300743 } else if (token == 0 && !opts->discovery_nqn) {
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200744 /* Allowed for debug */
745 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
746 }
747 opts->kato = token;
Guilherme G. Piccoli8edd11c2017-09-14 13:59:28 -0300748
749 if (opts->discovery_nqn && opts->kato) {
750 pr_err("Discovery controllers cannot accept KATO != 0\n");
751 ret = -EINVAL;
752 goto out;
753 }
754
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200755 break;
Sagi Grimberg42a45272017-03-18 20:52:36 +0200756 case NVMF_OPT_CTRL_LOSS_TMO:
757 if (match_int(args, &token)) {
758 ret = -EINVAL;
759 goto out;
760 }
761
762 if (token < 0)
763 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
764 ctrl_loss_tmo = token;
765 break;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200766 case NVMF_OPT_HOSTNQN:
767 if (opts->host) {
768 pr_err("hostnqn already user-assigned: %s\n",
769 opts->host->nqn);
770 ret = -EADDRINUSE;
771 goto out;
772 }
773 p = match_strdup(args);
774 if (!p) {
775 ret = -ENOMEM;
776 goto out;
777 }
778 nqnlen = strlen(p);
779 if (nqnlen >= NVMF_NQN_SIZE) {
780 pr_err("%s needs to be < %d bytes\n",
781 p, NVMF_NQN_SIZE);
Bart Van Assche8eadfcb2016-10-18 13:10:46 -0700782 kfree(p);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200783 ret = -EINVAL;
784 goto out;
785 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800786 nvmf_host_put(opts->host);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200787 opts->host = nvmf_host_add(p);
Bart Van Assche8eadfcb2016-10-18 13:10:46 -0700788 kfree(p);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200789 if (!opts->host) {
790 ret = -ENOMEM;
791 goto out;
792 }
793 break;
794 case NVMF_OPT_RECONNECT_DELAY:
795 if (match_int(args, &token)) {
796 ret = -EINVAL;
797 goto out;
798 }
799 if (token <= 0) {
800 pr_err("Invalid reconnect_delay %d\n", token);
801 ret = -EINVAL;
802 goto out;
803 }
804 opts->reconnect_delay = token;
805 break;
James Smart478bcb92016-08-02 10:42:10 +0300806 case NVMF_OPT_HOST_TRADDR:
807 p = match_strdup(args);
808 if (!p) {
809 ret = -ENOMEM;
810 goto out;
811 }
Chengguang Xu59a2f3f2018-04-14 20:06:19 +0800812 kfree(opts->host_traddr);
James Smart478bcb92016-08-02 10:42:10 +0300813 opts->host_traddr = p;
814 break;
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200815 case NVMF_OPT_HOST_ID:
816 p = match_strdup(args);
817 if (!p) {
818 ret = -ENOMEM;
819 goto out;
820 }
Roland Dreierdf351ef2018-01-11 13:38:00 -0800821 ret = uuid_parse(p, &hostid);
Roland Dreierdf351ef2018-01-11 13:38:00 -0800822 if (ret) {
Guan Junxiong9b483da2017-08-03 21:40:26 +0800823 pr_err("Invalid hostid %s\n", p);
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200824 ret = -EINVAL;
Johannes Thumshirn6e494122018-01-25 09:09:25 +0100825 kfree(p);
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200826 goto out;
827 }
Johannes Thumshirn6e494122018-01-25 09:09:25 +0100828 kfree(p);
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200829 break;
James Smart3b338762017-10-20 16:17:06 -0700830 case NVMF_OPT_DUP_CONNECT:
831 opts->duplicate_connect = true;
832 break;
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800833 case NVMF_OPT_DISABLE_SQFLOW:
834 opts->disable_sqflow = true;
835 break;
Sagi Grimberg3b49fa82018-12-03 17:52:12 -0800836 case NVMF_OPT_HDR_DIGEST:
837 opts->hdr_digest = true;
838 break;
Sagi Grimberg20d44e82018-12-03 17:52:13 -0800839 case NVMF_OPT_DATA_DIGEST:
840 opts->data_digest = true;
841 break;
Sagi Grimberg330f6b82018-12-11 23:38:56 -0800842 case NVMF_OPT_NR_WRITE_QUEUES:
843 if (match_int(args, &token)) {
844 ret = -EINVAL;
845 goto out;
846 }
847 if (token <= 0) {
848 pr_err("Invalid nr_write_queues %d\n", token);
849 ret = -EINVAL;
850 goto out;
851 }
852 opts->nr_write_queues = token;
853 break;
Sagi Grimberg89d43802018-12-14 11:06:09 -0800854 case NVMF_OPT_NR_POLL_QUEUES:
855 if (match_int(args, &token)) {
856 ret = -EINVAL;
857 goto out;
858 }
859 if (token <= 0) {
860 pr_err("Invalid nr_poll_queues %d\n", token);
861 ret = -EINVAL;
862 goto out;
863 }
864 opts->nr_poll_queues = token;
865 break;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200866 default:
867 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
868 p);
869 ret = -EINVAL;
870 goto out;
871 }
872 }
873
Hannes Reinecke461fbc82018-05-24 16:18:15 +0200874 if (opts->discovery_nqn) {
875 opts->kato = 0;
876 opts->nr_io_queues = 0;
Hannes Reinecke181303d2018-05-24 16:18:17 +0200877 opts->duplicate_connect = true;
Hannes Reinecke461fbc82018-05-24 16:18:15 +0200878 }
Sagi Grimberg42a45272017-03-18 20:52:36 +0200879 if (ctrl_loss_tmo < 0)
880 opts->max_reconnects = -1;
881 else
882 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
883 opts->reconnect_delay);
884
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200885 if (!opts->host) {
886 kref_get(&nvmf_default_host->ref);
887 opts->host = nvmf_default_host;
888 }
889
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200890 uuid_copy(&opts->host->id, &hostid);
891
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200892out:
893 kfree(options);
894 return ret;
895}
896
897static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
898 unsigned int required_opts)
899{
900 if ((opts->mask & required_opts) != required_opts) {
901 int i;
902
903 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
904 if ((opt_tokens[i].token & required_opts) &&
905 !(opt_tokens[i].token & opts->mask)) {
906 pr_warn("missing parameter '%s'\n",
907 opt_tokens[i].pattern);
908 }
909 }
910
911 return -EINVAL;
912 }
913
914 return 0;
915}
916
Sagi Grimbergb7c7be6f62018-10-18 17:40:40 -0700917bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
918 struct nvmf_ctrl_options *opts)
919{
920 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
921 strcmp(opts->traddr, ctrl->opts->traddr) ||
922 strcmp(opts->trsvcid, ctrl->opts->trsvcid))
923 return false;
924
925 /*
926 * Checking the local address is rough. In most cases, none is specified
927 * and the host port is selected by the stack.
928 *
929 * Assume no match if:
930 * - local address is specified and address is not the same
931 * - local address is not specified but remote is, or vice versa
932 * (admin using specific host_traddr when it matters).
933 */
934 if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
935 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
936 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
937 return false;
938 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
939 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
940 return false;
941 }
942
943 return true;
944}
945EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
946
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200947static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
948 unsigned int allowed_opts)
949{
950 if (opts->mask & ~allowed_opts) {
951 int i;
952
953 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
Christoph Hellwig81a0b8d2017-08-17 13:57:49 +0200954 if ((opt_tokens[i].token & opts->mask) &&
955 (opt_tokens[i].token & ~allowed_opts)) {
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200956 pr_warn("invalid parameter '%s'\n",
957 opt_tokens[i].pattern);
958 }
959 }
960
961 return -EINVAL;
962 }
963
964 return 0;
965}
966
967void nvmf_free_options(struct nvmf_ctrl_options *opts)
968{
969 nvmf_host_put(opts->host);
970 kfree(opts->transport);
971 kfree(opts->traddr);
972 kfree(opts->trsvcid);
973 kfree(opts->subsysnqn);
James Smart478bcb92016-08-02 10:42:10 +0300974 kfree(opts->host_traddr);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200975 kfree(opts);
976}
977EXPORT_SYMBOL_GPL(nvmf_free_options);
978
979#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
980#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
Johannes Thumshirn6bfe0422017-06-20 14:23:01 +0200981 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
Sagi Grimberg8154ed72018-11-19 14:11:15 -0800982 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
983 NVMF_OPT_DISABLE_SQFLOW)
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200984
985static struct nvme_ctrl *
986nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
987{
988 struct nvmf_ctrl_options *opts;
989 struct nvmf_transport_ops *ops;
990 struct nvme_ctrl *ctrl;
991 int ret;
992
993 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
994 if (!opts)
995 return ERR_PTR(-ENOMEM);
996
997 ret = nvmf_parse_options(opts, buf);
998 if (ret)
999 goto out_free_opts;
1000
Sagi Grimbergd1f10712017-09-24 16:15:55 +03001001
1002 request_module("nvme-%s", opts->transport);
1003
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001004 /*
1005 * Check the generic options first as we need a valid transport for
1006 * the lookup below. Then clear the generic flags so that transport
1007 * drivers don't have to care about them.
1008 */
1009 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
1010 if (ret)
1011 goto out_free_opts;
1012 opts->mask &= ~NVMF_REQUIRED_OPTS;
1013
Roland Dreier489beb92017-08-29 10:33:44 -07001014 down_read(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001015 ops = nvmf_lookup_transport(opts);
1016 if (!ops) {
1017 pr_info("no handler found for transport %s.\n",
1018 opts->transport);
1019 ret = -EINVAL;
1020 goto out_unlock;
1021 }
1022
Roy Shterman0de5cd32017-12-25 14:18:30 +02001023 if (!try_module_get(ops->module)) {
1024 ret = -EBUSY;
1025 goto out_unlock;
1026 }
Johannes Thumshirn12a0b662018-06-01 09:11:20 +02001027 up_read(&nvmf_transports_rwsem);
Roy Shterman0de5cd32017-12-25 14:18:30 +02001028
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001029 ret = nvmf_check_required_opts(opts, ops->required_opts);
1030 if (ret)
Roy Shterman0de5cd32017-12-25 14:18:30 +02001031 goto out_module_put;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001032 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
1033 ops->allowed_opts | ops->required_opts);
1034 if (ret)
Roy Shterman0de5cd32017-12-25 14:18:30 +02001035 goto out_module_put;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001036
1037 ctrl = ops->create_ctrl(dev, opts);
1038 if (IS_ERR(ctrl)) {
1039 ret = PTR_ERR(ctrl);
Roy Shterman0de5cd32017-12-25 14:18:30 +02001040 goto out_module_put;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001041 }
1042
Roy Shterman0de5cd32017-12-25 14:18:30 +02001043 module_put(ops->module);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001044 return ctrl;
1045
Roy Shterman0de5cd32017-12-25 14:18:30 +02001046out_module_put:
1047 module_put(ops->module);
Johannes Thumshirn12a0b662018-06-01 09:11:20 +02001048 goto out_free_opts;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001049out_unlock:
Roland Dreier489beb92017-08-29 10:33:44 -07001050 up_read(&nvmf_transports_rwsem);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001051out_free_opts:
Bart Van Asschef3116d82016-10-18 13:11:03 -07001052 nvmf_free_options(opts);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001053 return ERR_PTR(ret);
1054}
1055
1056static struct class *nvmf_class;
1057static struct device *nvmf_device;
1058static DEFINE_MUTEX(nvmf_dev_mutex);
1059
1060static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
1061 size_t count, loff_t *pos)
1062{
1063 struct seq_file *seq_file = file->private_data;
1064 struct nvme_ctrl *ctrl;
1065 const char *buf;
1066 int ret = 0;
1067
1068 if (count > PAGE_SIZE)
1069 return -ENOMEM;
1070
1071 buf = memdup_user_nul(ubuf, count);
1072 if (IS_ERR(buf))
1073 return PTR_ERR(buf);
1074
1075 mutex_lock(&nvmf_dev_mutex);
1076 if (seq_file->private) {
1077 ret = -EINVAL;
1078 goto out_unlock;
1079 }
1080
1081 ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
1082 if (IS_ERR(ctrl)) {
1083 ret = PTR_ERR(ctrl);
1084 goto out_unlock;
1085 }
1086
1087 seq_file->private = ctrl;
1088
1089out_unlock:
1090 mutex_unlock(&nvmf_dev_mutex);
1091 kfree(buf);
1092 return ret ? ret : count;
1093}
1094
1095static int nvmf_dev_show(struct seq_file *seq_file, void *private)
1096{
1097 struct nvme_ctrl *ctrl;
1098 int ret = 0;
1099
1100 mutex_lock(&nvmf_dev_mutex);
1101 ctrl = seq_file->private;
1102 if (!ctrl) {
1103 ret = -EINVAL;
1104 goto out_unlock;
1105 }
1106
1107 seq_printf(seq_file, "instance=%d,cntlid=%d\n",
1108 ctrl->instance, ctrl->cntlid);
1109
1110out_unlock:
1111 mutex_unlock(&nvmf_dev_mutex);
1112 return ret;
1113}
1114
1115static int nvmf_dev_open(struct inode *inode, struct file *file)
1116{
1117 /*
1118 * The miscdevice code initializes file->private_data, but doesn't
1119 * make use of it later.
1120 */
1121 file->private_data = NULL;
1122 return single_open(file, nvmf_dev_show, NULL);
1123}
1124
1125static int nvmf_dev_release(struct inode *inode, struct file *file)
1126{
1127 struct seq_file *seq_file = file->private_data;
1128 struct nvme_ctrl *ctrl = seq_file->private;
1129
1130 if (ctrl)
1131 nvme_put_ctrl(ctrl);
1132 return single_release(inode, file);
1133}
1134
1135static const struct file_operations nvmf_dev_fops = {
1136 .owner = THIS_MODULE,
1137 .write = nvmf_dev_write,
1138 .read = seq_read,
1139 .open = nvmf_dev_open,
1140 .release = nvmf_dev_release,
1141};
1142
1143static struct miscdevice nvmf_misc = {
1144 .minor = MISC_DYNAMIC_MINOR,
1145 .name = "nvme-fabrics",
1146 .fops = &nvmf_dev_fops,
1147};
1148
1149static int __init nvmf_init(void)
1150{
1151 int ret;
1152
1153 nvmf_default_host = nvmf_host_default();
1154 if (!nvmf_default_host)
1155 return -ENOMEM;
1156
1157 nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
1158 if (IS_ERR(nvmf_class)) {
1159 pr_err("couldn't register class nvme-fabrics\n");
1160 ret = PTR_ERR(nvmf_class);
1161 goto out_free_host;
1162 }
1163
1164 nvmf_device =
1165 device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1166 if (IS_ERR(nvmf_device)) {
1167 pr_err("couldn't create nvme-fabris device!\n");
1168 ret = PTR_ERR(nvmf_device);
1169 goto out_destroy_class;
1170 }
1171
1172 ret = misc_register(&nvmf_misc);
1173 if (ret) {
1174 pr_err("couldn't register misc device: %d\n", ret);
1175 goto out_destroy_device;
1176 }
1177
1178 return 0;
1179
1180out_destroy_device:
1181 device_destroy(nvmf_class, MKDEV(0, 0));
1182out_destroy_class:
1183 class_destroy(nvmf_class);
1184out_free_host:
1185 nvmf_host_put(nvmf_default_host);
1186 return ret;
1187}
1188
1189static void __exit nvmf_exit(void)
1190{
1191 misc_deregister(&nvmf_misc);
1192 device_destroy(nvmf_class, MKDEV(0, 0));
1193 class_destroy(nvmf_class);
1194 nvmf_host_put(nvmf_default_host);
1195
1196 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
1197 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
1198 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
1199 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
1200}
1201
1202MODULE_LICENSE("GPL v2");
1203
1204module_init(nvmf_init);
1205module_exit(nvmf_exit);