blob: 42bd12b8bf00ca000d8369f25e33ed629fe0c6db [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * NVMe Fabrics command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/blkdev.h>
8#include "nvmet.h"
9
10static void nvmet_execute_prop_set(struct nvmet_req *req)
11{
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -080012 u64 val = le64_to_cpu(req->cmd->prop_set.value);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020013 u16 status = 0;
14
Israel Rukshin136cc1f2020-05-19 17:05:59 +030015 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -060016 return;
17
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -080018 if (req->cmd->prop_set.attrib & 1) {
19 req->error_loc =
20 offsetof(struct nvmf_property_set_command, attrib);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020021 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -080022 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020023 }
24
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -080025 switch (le32_to_cpu(req->cmd->prop_set.offset)) {
26 case NVME_REG_CC:
27 nvmet_update_cc(req->sq->ctrl, val);
28 break;
29 default:
30 req->error_loc =
31 offsetof(struct nvmf_property_set_command, offset);
32 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
33 }
34out:
Christoph Hellwiga07b4972016-06-21 18:04:20 +020035 nvmet_req_complete(req, status);
36}
37
38static void nvmet_execute_prop_get(struct nvmet_req *req)
39{
40 struct nvmet_ctrl *ctrl = req->sq->ctrl;
41 u16 status = 0;
42 u64 val = 0;
43
Israel Rukshin136cc1f2020-05-19 17:05:59 +030044 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -060045 return;
46
Christoph Hellwiga07b4972016-06-21 18:04:20 +020047 if (req->cmd->prop_get.attrib & 1) {
48 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
49 case NVME_REG_CAP:
50 val = ctrl->cap;
51 break;
52 default:
53 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
54 break;
55 }
56 } else {
57 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
58 case NVME_REG_VS:
59 val = ctrl->subsys->ver;
60 break;
61 case NVME_REG_CC:
62 val = ctrl->cc;
63 break;
64 case NVME_REG_CSTS:
65 val = ctrl->csts;
66 break;
67 default:
68 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
69 break;
70 }
71 }
72
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -080073 if (status && req->cmd->prop_get.attrib & 1) {
74 req->error_loc =
75 offsetof(struct nvmf_property_get_command, offset);
76 } else {
77 req->error_loc =
78 offsetof(struct nvmf_property_get_command, attrib);
79 }
80
Max Gurtovoyfc6c9732019-04-08 18:39:59 +030081 req->cqe->result.u64 = cpu_to_le64(val);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020082 nvmet_req_complete(req, status);
83}
84
Parav Pandit64a0ca82017-02-27 23:21:33 -060085u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +020086{
87 struct nvme_command *cmd = req->cmd;
88
Christoph Hellwiga07b4972016-06-21 18:04:20 +020089 switch (cmd->fabrics.fctype) {
90 case nvme_fabrics_type_property_set:
Christoph Hellwiga07b4972016-06-21 18:04:20 +020091 req->execute = nvmet_execute_prop_set;
92 break;
93 case nvme_fabrics_type_property_get:
Christoph Hellwiga07b4972016-06-21 18:04:20 +020094 req->execute = nvmet_execute_prop_get;
95 break;
96 default:
97 pr_err("received unknown capsule type 0x%x\n",
98 cmd->fabrics.fctype);
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -080099 req->error_loc = offsetof(struct nvmf_common_command, fctype);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200100 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
101 }
102
103 return 0;
104}
105
106static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
107{
108 struct nvmf_connect_command *c = &req->cmd->connect;
109 u16 qid = le16_to_cpu(c->qid);
110 u16 sqsize = le16_to_cpu(c->sqsize);
111 struct nvmet_ctrl *old;
Israel Rukshin1a3f5402020-02-04 14:38:10 +0200112 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200113
114 old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
115 if (old) {
116 pr_warn("queue already connected!\n");
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800117 req->error_loc = offsetof(struct nvmf_connect_command, opcode);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200118 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
119 }
James Smartbb1cc742017-09-18 09:08:29 -0700120 if (!sqsize) {
121 pr_warn("queue size zero!\n");
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800122 req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
Israel Rukshin1a3f5402020-02-04 14:38:10 +0200123 ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
124 goto err;
James Smartbb1cc742017-09-18 09:08:29 -0700125 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200126
James Smartbb1cc742017-09-18 09:08:29 -0700127 /* note: convert queue size from 0's-based value to 1's-based value */
128 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
129 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800130
131 if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
132 req->sq->sqhd_disabled = true;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300133 req->cqe->sq_head = cpu_to_le16(0xffff);
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800134 }
135
Sagi Grimberg1672ddb2018-12-03 17:52:11 -0800136 if (ctrl->ops->install_queue) {
Israel Rukshin1a3f5402020-02-04 14:38:10 +0200137 ret = ctrl->ops->install_queue(req->sq);
Sagi Grimberg1672ddb2018-12-03 17:52:11 -0800138 if (ret) {
139 pr_err("failed to install queue %d cntlid %d ret %x\n",
Israel Rukshin0b87a2b2020-02-04 14:38:09 +0200140 qid, ctrl->cntlid, ret);
Israel Rukshin1a3f5402020-02-04 14:38:10 +0200141 goto err;
Sagi Grimberg1672ddb2018-12-03 17:52:11 -0800142 }
143 }
144
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200145 return 0;
Israel Rukshin1a3f5402020-02-04 14:38:10 +0200146
147err:
148 req->sq->ctrl = NULL;
149 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200150}
151
152static void nvmet_execute_admin_connect(struct nvmet_req *req)
153{
154 struct nvmf_connect_command *c = &req->cmd->connect;
155 struct nvmf_connect_data *d;
156 struct nvmet_ctrl *ctrl = NULL;
157 u16 status = 0;
158
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300159 if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600160 return;
161
Logan Gunthorpe1c05cf92017-04-18 17:32:15 -0600162 d = kmalloc(sizeof(*d), GFP_KERNEL);
163 if (!d) {
164 status = NVME_SC_INTERNAL;
165 goto complete;
166 }
167
168 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
169 if (status)
170 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200171
172 /* zero out initial completion result, assign values as needed */
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300173 req->cqe->result.u32 = 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200174
175 if (c->recfmt != 0) {
176 pr_warn("invalid connect version (%d).\n",
177 le16_to_cpu(c->recfmt));
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800178 req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200179 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
180 goto out;
181 }
182
183 if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
184 pr_warn("connect attempt for invalid controller ID %#x\n",
185 d->cntlid);
186 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300187 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200188 goto out;
189 }
190
191 status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
Logan Gunthorpe1c05cf92017-04-18 17:32:15 -0600192 le32_to_cpu(c->kato), &ctrl);
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800193 if (status) {
194 if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
195 req->error_loc =
196 offsetof(struct nvme_common_command, opcode);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200197 goto out;
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800198 }
199
Israel Rukshinea52ac12020-05-19 17:06:01 +0300200 ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
201
Omri Mann28dd5cf2017-08-30 15:22:59 +0300202 uuid_copy(&ctrl->hostid, &d->hostid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200203
204 status = nvmet_install_queue(ctrl, req);
205 if (status) {
206 nvmet_ctrl_put(ctrl);
207 goto out;
208 }
209
Israel Rukshinea52ac12020-05-19 17:06:01 +0300210 pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
211 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
212 ctrl->pi_support ? " T10-PI is enabled" : "");
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300213 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200214
215out:
Logan Gunthorpe1c05cf92017-04-18 17:32:15 -0600216 kfree(d);
217complete:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200218 nvmet_req_complete(req, status);
219}
220
221static void nvmet_execute_io_connect(struct nvmet_req *req)
222{
223 struct nvmf_connect_command *c = &req->cmd->connect;
224 struct nvmf_connect_data *d;
225 struct nvmet_ctrl *ctrl = NULL;
226 u16 qid = le16_to_cpu(c->qid);
227 u16 status = 0;
228
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300229 if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600230 return;
231
Logan Gunthorpe1c05cf92017-04-18 17:32:15 -0600232 d = kmalloc(sizeof(*d), GFP_KERNEL);
233 if (!d) {
234 status = NVME_SC_INTERNAL;
235 goto complete;
236 }
237
238 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
239 if (status)
240 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200241
242 /* zero out initial completion result, assign values as needed */
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300243 req->cqe->result.u32 = 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200244
245 if (c->recfmt != 0) {
246 pr_warn("invalid connect version (%d).\n",
247 le16_to_cpu(c->recfmt));
248 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
249 goto out;
250 }
251
252 status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
Logan Gunthorpe1c05cf92017-04-18 17:32:15 -0600253 le16_to_cpu(d->cntlid),
254 req, &ctrl);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200255 if (status)
256 goto out;
257
258 if (unlikely(qid > ctrl->subsys->max_qid)) {
259 pr_warn("invalid queue id (%d)\n", qid);
260 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300261 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200262 goto out_ctrl_put;
263 }
264
265 status = nvmet_install_queue(ctrl, req);
266 if (status) {
267 /* pass back cntlid that had the issue of installing queue */
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300268 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200269 goto out_ctrl_put;
270 }
271
Sagi Grimberg6a1c57a2017-12-04 10:47:09 +0200272 pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200273
274out:
Logan Gunthorpe1c05cf92017-04-18 17:32:15 -0600275 kfree(d);
276complete:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200277 nvmet_req_complete(req, status);
278 return;
279
280out_ctrl_put:
281 nvmet_ctrl_put(ctrl);
282 goto out;
283}
284
Parav Pandit64a0ca82017-02-27 23:21:33 -0600285u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200286{
287 struct nvme_command *cmd = req->cmd;
288
Minwoo Im7a1f46e2019-06-06 14:30:14 +0900289 if (!nvme_is_fabrics(cmd)) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200290 pr_err("invalid command 0x%x on unconnected queue.\n",
291 cmd->fabrics.opcode);
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800292 req->error_loc = offsetof(struct nvme_common_command, opcode);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200293 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
294 }
295 if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
296 pr_err("invalid capsule type 0x%x on unconnected queue.\n",
297 cmd->fabrics.fctype);
Chaitanya Kulkarni84faf422018-12-12 15:11:44 -0800298 req->error_loc = offsetof(struct nvmf_common_command, fctype);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200299 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
300 }
301
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200302 if (cmd->connect.qid == 0)
303 req->execute = nvmet_execute_admin_connect;
304 else
305 req->execute = nvmet_execute_io_connect;
306 return 0;
307}