blob: 613a4d8feac125ead5e7b58c3ac96bae66c8faa2 [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +01008#include <linux/rculist.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +01009#include <linux/part_stat.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010010
Christoph Hellwiga07b4972016-06-21 18:04:20 +020011#include <generated/utsrelease.h>
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010012#include <asm/unaligned.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020013#include "nvmet.h"
14
15u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16{
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26}
27
Amit Engele17016f2020-01-08 01:47:24 +090028static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29{
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36}
37
Keith Buschd808b7f2019-04-09 10:03:59 -060038u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39{
40 return le64_to_cpu(cmd->get_log_page.lpo);
41}
42
Christoph Hellwig8ab08052018-05-22 11:10:03 +020043static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44{
Christoph Hellwige9061c32019-10-23 10:35:44 -060045 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
Christoph Hellwig8ab08052018-05-22 11:10:03 +020046}
47
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080048static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49{
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080051 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
Amit5f8badb2019-09-12 08:29:39 +030060 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080062 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
Amit5f8badb2019-09-12 08:29:39 +030071 nvmet_req_complete(req, 0);
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080072}
73
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010074static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
76{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010077 u64 host_reads, host_writes, data_units_read, data_units_written;
78
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080079 req->ns = nvmet_find_namespace(req->sq->ctrl,
80 req->cmd->get_log_page.nsid);
81 if (!req->ns) {
Chaitanya Kulkarnid93cb392018-09-10 17:39:33 -070082 pr_err("Could not find namespace id : %d\n",
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010083 le32_to_cpu(req->cmd->get_log_page.nsid));
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -080084 req->error_loc = offsetof(struct nvme_rw_command, nsid);
Sagi Grimberg4185f252017-11-08 12:00:30 +020085 return NVME_SC_INVALID_NS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010086 }
87
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040088 /* we don't have the right data for file backed ns */
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080089 if (!req->ns->bdev)
90 return NVME_SC_SUCCESS;
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040091
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080092 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
Christoph Hellwig8446fe92020-11-24 09:36:54 +010093 data_units_read =
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080094 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
95 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
Christoph Hellwig8446fe92020-11-24 09:36:54 +010096 data_units_written =
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080097 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010098
99 put_unaligned_le64(host_reads, &slog->host_reads[0]);
100 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101 put_unaligned_le64(host_writes, &slog->host_writes[0]);
102 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
Sagi Grimberg4185f252017-11-08 12:00:30 +0200103
104 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100105}
106
107static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
108 struct nvme_smart_log *slog)
109{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100110 u64 host_reads = 0, host_writes = 0;
111 u64 data_units_read = 0, data_units_written = 0;
112 struct nvmet_ns *ns;
113 struct nvmet_ctrl *ctrl;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700114 unsigned long idx;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100115
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100116 ctrl = req->sq->ctrl;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700117 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400118 /* we don't have the right data for file backed ns */
119 if (!ns->bdev)
120 continue;
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100121 host_reads += part_stat_read(ns->bdev, ios[READ]);
Tom Wu3bec2e32019-08-08 02:22:36 +0000122 data_units_read += DIV_ROUND_UP(
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100123 part_stat_read(ns->bdev, sectors[READ]), 1000);
124 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
Tom Wu3bec2e32019-08-08 02:22:36 +0000125 data_units_written += DIV_ROUND_UP(
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100126 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100127 }
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100128
129 put_unaligned_le64(host_reads, &slog->host_reads[0]);
130 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
131 put_unaligned_le64(host_writes, &slog->host_writes[0]);
132 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
133
Sagi Grimberg4185f252017-11-08 12:00:30 +0200134 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100135}
136
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200137static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100138{
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200139 struct nvme_smart_log *log;
140 u16 status = NVME_SC_INTERNAL;
Chaitanya Kulkarni23454d52018-12-12 15:11:48 -0800141 unsigned long flags;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100142
Christoph Hellwige9061c32019-10-23 10:35:44 -0600143 if (req->transfer_len != sizeof(*log))
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200144 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200145
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200146 log = kzalloc(sizeof(*log), GFP_KERNEL);
147 if (!log)
148 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200149
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200150 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
151 status = nvmet_get_smart_log_all(req, log);
152 else
153 status = nvmet_get_smart_log_nsid(req, log);
154 if (status)
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400155 goto out_free_log;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200156
Chaitanya Kulkarni23454d52018-12-12 15:11:48 -0800157 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
158 put_unaligned_le64(req->sq->ctrl->err_counter,
159 &log->num_err_log_entries);
160 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
161
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200162 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400163out_free_log:
164 kfree(log);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200165out:
166 nvmet_req_complete(req, status);
167}
168
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400169static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
170{
171 u16 status = NVME_SC_INTERNAL;
172 struct nvme_effects_log *log;
173
174 log = kzalloc(sizeof(*log), GFP_KERNEL);
175 if (!log)
176 goto out;
177
178 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
179 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
180 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
181 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
182 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
183 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
184 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
185
186 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
187 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
188 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
189 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
190 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
191
192 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
193
194 kfree(log);
195out:
196 nvmet_req_complete(req, status);
197}
198
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200199static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
200{
201 struct nvmet_ctrl *ctrl = req->sq->ctrl;
202 u16 status = NVME_SC_INTERNAL;
203 size_t len;
204
Christoph Hellwige9061c32019-10-23 10:35:44 -0600205 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200206 goto out;
207
208 mutex_lock(&ctrl->lock);
209 if (ctrl->nr_changed_ns == U32_MAX)
210 len = sizeof(__le32);
211 else
212 len = ctrl->nr_changed_ns * sizeof(__le32);
213 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
214 if (!status)
Christoph Hellwige9061c32019-10-23 10:35:44 -0600215 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200216 ctrl->nr_changed_ns = 0;
Jay Sternberg7114dde2018-11-12 13:56:34 -0800217 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200218 mutex_unlock(&ctrl->lock);
219out:
220 nvmet_req_complete(req, status);
221}
222
Christoph Hellwig72efd252018-07-19 07:35:20 -0700223static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
224 struct nvme_ana_group_desc *desc)
225{
226 struct nvmet_ctrl *ctrl = req->sq->ctrl;
227 struct nvmet_ns *ns;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700228 unsigned long idx;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700229 u32 count = 0;
230
231 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700232 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
Christoph Hellwig72efd252018-07-19 07:35:20 -0700233 if (ns->anagrpid == grpid)
234 desc->nsids[count++] = cpu_to_le32(ns->nsid);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700235 }
236
237 desc->grpid = cpu_to_le32(grpid);
238 desc->nnsids = cpu_to_le32(count);
239 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
240 desc->state = req->port->ana_state[grpid];
241 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
242 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
243}
244
245static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
246{
247 struct nvme_ana_rsp_hdr hdr = { 0, };
248 struct nvme_ana_group_desc *desc;
249 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
250 size_t len;
251 u32 grpid;
252 u16 ngrps = 0;
253 u16 status;
254
255 status = NVME_SC_INTERNAL;
256 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
257 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
258 if (!desc)
259 goto out;
260
261 down_read(&nvmet_ana_sem);
262 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
263 if (!nvmet_ana_group_enabled[grpid])
264 continue;
265 len = nvmet_format_ana_group(req, grpid, desc);
266 status = nvmet_copy_to_sgl(req, offset, desc, len);
267 if (status)
268 break;
269 offset += len;
270 ngrps++;
271 }
Hannes Reineckebe1277f2018-07-16 12:58:33 +0200272 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
273 if (nvmet_ana_group_enabled[grpid])
274 ngrps++;
275 }
Christoph Hellwig72efd252018-07-19 07:35:20 -0700276
277 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
278 hdr.ngrps = cpu_to_le16(ngrps);
Jay Sternberg7114dde2018-11-12 13:56:34 -0800279 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700280 up_read(&nvmet_ana_sem);
281
282 kfree(desc);
283
284 /* copy the header last once we know the number of groups */
285 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
286out:
287 nvmet_req_complete(req, status);
288}
289
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600290static void nvmet_execute_get_log_page(struct nvmet_req *req)
291{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300292 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600293 return;
294
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600295 switch (req->cmd->get_log_page.lid) {
296 case NVME_LOG_ERROR:
297 return nvmet_execute_get_log_page_error(req);
298 case NVME_LOG_SMART:
299 return nvmet_execute_get_log_page_smart(req);
300 case NVME_LOG_FW_SLOT:
301 /*
302 * We only support a single firmware slot which always is
303 * active, so we can zero out the whole firmware slot log and
304 * still claim to fully implement this mandatory log page.
305 */
306 return nvmet_execute_get_log_page_noop(req);
307 case NVME_LOG_CHANGED_NS:
308 return nvmet_execute_get_log_changed_ns(req);
309 case NVME_LOG_CMD_EFFECTS:
310 return nvmet_execute_get_log_cmd_effects_ns(req);
311 case NVME_LOG_ANA:
312 return nvmet_execute_get_log_page_ana(req);
313 }
314 pr_err("unhandled lid %d on qid %d\n",
315 req->cmd->get_log_page.lid, req->sq->qid);
316 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
317 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
318}
319
Mark Ruijter013b7eb2020-01-30 10:29:32 -0800320static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
321 struct nvmet_subsys *subsys)
322{
323 const char *model = NVMET_DEFAULT_CTRL_MODEL;
324 struct nvmet_subsys_model *subsys_model;
325
326 rcu_read_lock();
327 subsys_model = rcu_dereference(subsys->model);
328 if (subsys_model)
329 model = subsys_model->number;
330 memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
331 rcu_read_unlock();
332}
333
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200334static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
335{
336 struct nvmet_ctrl *ctrl = req->sq->ctrl;
337 struct nvme_id_ctrl *id;
Israel Rukshinea52ac12020-05-19 17:06:01 +0300338 u32 cmd_capsule_size;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200339 u16 status = 0;
340
341 id = kzalloc(sizeof(*id), GFP_KERNEL);
342 if (!id) {
343 status = NVME_SC_INTERNAL;
344 goto out;
345 }
346
347 /* XXX: figure out how to assign real vendors IDs. */
348 id->vid = 0;
349 id->ssvid = 0;
350
Daniel Verkampc7399692018-04-12 09:16:13 -0600351 memset(id->sn, ' ', sizeof(id->sn));
Martin Wilck42de82a2017-07-14 00:25:31 +0200352 bin2hex(id->sn, &ctrl->subsys->serial,
353 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
Mark Ruijter013b7eb2020-01-30 10:29:32 -0800354 nvmet_id_set_model_number(id, ctrl->subsys);
Martin Wilck17c39d02017-08-14 22:12:39 +0200355 memcpy_and_pad(id->fr, sizeof(id->fr),
356 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200357
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200358 id->rab = 6;
359
360 /*
361 * XXX: figure out how we can assign a IEEE OUI, but until then
362 * the safest is to leave it as zeroes.
363 */
364
Christoph Hellwig72efd252018-07-19 07:35:20 -0700365 /* we support multiple ports, multiples hosts and ANA: */
366 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200367
Max Gurtovoy02cb00e2020-03-08 12:55:03 +0200368 /* Limit MDTS according to transport capability */
369 if (ctrl->ops->get_mdts)
370 id->mdts = ctrl->ops->get_mdts(ctrl);
371 else
372 id->mdts = 0;
373
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200374 id->cntlid = cpu_to_le16(ctrl->cntlid);
375 id->ver = cpu_to_le32(ctrl->subsys->ver);
376
377 /* XXX: figure out what to do about RTD3R/RTD3 */
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200378 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
Sagi Grimbergc09305a2018-11-02 10:28:13 -0700379 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
380 NVME_CTRL_ATTR_TBKAS);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200381
382 id->oacs = 0;
383
384 /*
385 * We don't really have a practical limit on the number of abort
386 * comands. But we don't do anything useful for abort either, so
387 * no point in allowing more abort commands than the spec requires.
388 */
389 id->acl = 3;
390
391 id->aerl = NVMET_ASYNC_EVENTS - 1;
392
393 /* first slot is read-only, only one slot supported */
394 id->frmw = (1 << 0) | (1 << 1);
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400395 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200396 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
397 id->npss = 0;
398
399 /* We support keep-alive timeout in granularity of seconds */
400 id->kas = cpu_to_le16(NVMET_KAS);
401
402 id->sqes = (0x6 << 4) | 0x6;
403 id->cqes = (0x4 << 4) | 0x4;
404
405 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
406 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
407
408 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200409 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800410 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
411 NVME_CTRL_ONCS_WRITE_ZEROES);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200412
413 /* XXX: don't report vwc if the underlying device is write through */
414 id->vwc = NVME_CTRL_VWC_PRESENT;
415
416 /*
417 * We can't support atomic writes bigger than a LBA without support
418 * from the backend device.
419 */
420 id->awun = 0;
421 id->awupf = 0;
422
423 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
Max Gurtovoy6fa350f2020-06-02 16:15:46 +0300424 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200425 id->sgls |= cpu_to_le32(1 << 2);
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700426 if (req->port->inline_data_size)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200427 id->sgls |= cpu_to_le32(1 << 20);
428
Bart Van Assche5eadc9c2018-10-08 14:28:51 -0700429 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200430
Israel Rukshinea52ac12020-05-19 17:06:01 +0300431 /*
432 * Max command capsule size is sqe + in-capsule data size.
433 * Disable in-capsule data for Metadata capable controllers.
434 */
435 cmd_capsule_size = sizeof(struct nvme_command);
436 if (!ctrl->pi_support)
437 cmd_capsule_size += req->port->inline_data_size;
438 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
439
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200440 /* Max response capsule size is cqe */
441 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
442
443 id->msdbd = ctrl->ops->msdbd;
444
Christoph Hellwig72efd252018-07-19 07:35:20 -0700445 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
446 id->anatt = 10; /* random value */
447 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
448 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
449
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200450 /*
451 * Meh, we don't really support any power state. Fake up the same
452 * values that qemu does.
453 */
454 id->psd[0].max_power = cpu_to_le16(0x9c4);
455 id->psd[0].entry_lat = cpu_to_le32(0x10);
456 id->psd[0].exit_lat = cpu_to_le32(0x4);
457
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700458 id->nwpc = 1 << 0; /* write protect and no write protect */
459
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200460 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
461
462 kfree(id);
463out:
464 nvmet_req_complete(req, status);
465}
466
467static void nvmet_execute_identify_ns(struct nvmet_req *req)
468{
Israel Rukshinea52ac12020-05-19 17:06:01 +0300469 struct nvmet_ctrl *ctrl = req->sq->ctrl;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200470 struct nvme_id_ns *id;
471 u16 status = 0;
472
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200473 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800474 req->error_loc = offsetof(struct nvme_identify, nsid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200475 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
476 goto out;
477 }
478
479 id = kzalloc(sizeof(*id), GFP_KERNEL);
480 if (!id) {
481 status = NVME_SC_INTERNAL;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200482 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200483 }
484
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200485 /* return an all zeroed buffer if we can't find an active namespace */
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800486 req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
487 if (!req->ns) {
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800488 status = NVME_SC_INVALID_NS;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200489 goto done;
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800490 }
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200491
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800492 nvmet_ns_revalidate(req->ns);
Anthony Iliopoulose8cd1ff2020-04-19 16:48:50 -0700493
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200494 /*
Minwoo Im18c53e42017-11-07 21:10:22 +0900495 * nuse = ncap = nsze isn't always true, but we have no way to find
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200496 * that out from the underlying device.
497 */
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800498 id->ncap = id->nsze =
499 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
500 switch (req->port->ana_state[req->ns->anagrpid]) {
Christoph Hellwig72efd252018-07-19 07:35:20 -0700501 case NVME_ANA_INACCESSIBLE:
502 case NVME_ANA_PERSISTENT_LOSS:
503 break;
504 default:
505 id->nuse = id->nsze;
506 break;
507 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200508
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800509 if (req->ns->bdev)
510 nvmet_bdev_set_limits(req->ns->bdev, id);
Bart Van Assche9d05a962019-06-28 09:53:30 -0700511
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200512 /*
513 * We just provide a single LBA format that matches what the
514 * underlying device reports.
515 */
516 id->nlbaf = 0;
517 id->flbas = 0;
518
519 /*
520 * Our namespace might always be shared. Not just with other
521 * controllers, but also with any other user of the block device.
522 */
523 id->nmic = (1 << 0);
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800524 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200525
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800526 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200527
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800528 id->lbaf[0].ds = req->ns->blksize_shift;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200529
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800530 if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
Israel Rukshinea52ac12020-05-19 17:06:01 +0300531 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
532 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
533 NVME_NS_DPC_PI_TYPE3;
534 id->mc = NVME_MC_EXTENDED_LBA;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800535 id->dps = req->ns->pi_type;
Israel Rukshinea52ac12020-05-19 17:06:01 +0300536 id->flbas = NVME_NS_FLBAS_META_EXT;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800537 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
Israel Rukshinea52ac12020-05-19 17:06:01 +0300538 }
539
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800540 if (req->ns->readonly)
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700541 id->nsattr |= (1 << 0);
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200542done:
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800543 if (!status)
544 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
545
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200546 kfree(id);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200547out:
548 nvmet_req_complete(req, status);
549}
550
551static void nvmet_execute_identify_nslist(struct nvmet_req *req)
552{
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200553 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200554 struct nvmet_ctrl *ctrl = req->sq->ctrl;
555 struct nvmet_ns *ns;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700556 unsigned long idx;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200557 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
558 __le32 *list;
559 u16 status = 0;
560 int i = 0;
561
562 list = kzalloc(buf_size, GFP_KERNEL);
563 if (!list) {
564 status = NVME_SC_INTERNAL;
565 goto out;
566 }
567
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700568 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200569 if (ns->nsid <= min_nsid)
570 continue;
571 list[i++] = cpu_to_le32(ns->nsid);
572 if (i == buf_size / sizeof(__le32))
573 break;
574 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200575
576 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
577
578 kfree(list);
579out:
580 nvmet_req_complete(req, status);
581}
582
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200583static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
584 void *id, off_t *off)
585{
586 struct nvme_ns_id_desc desc = {
587 .nidt = type,
588 .nidl = len,
589 };
590 u16 status;
591
592 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
593 if (status)
594 return status;
595 *off += sizeof(desc);
596
597 status = nvmet_copy_to_sgl(req, *off, id, len);
598 if (status)
599 return status;
600 *off += len;
601
602 return 0;
603}
604
605static void nvmet_execute_identify_desclist(struct nvmet_req *req)
606{
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200607 u16 status = 0;
608 off_t off = 0;
609
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800610 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
611 if (!req->ns) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800612 req->error_loc = offsetof(struct nvme_identify, nsid);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200613 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
614 goto out;
615 }
616
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800617 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200618 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
619 NVME_NIDT_UUID_LEN,
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800620 &req->ns->uuid, &off);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200621 if (status)
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800622 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200623 }
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800624 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200625 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
626 NVME_NIDT_NGUID_LEN,
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800627 &req->ns->nguid, &off);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200628 if (status)
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800629 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200630 }
631
632 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
633 off) != NVME_IDENTIFY_DATA_SIZE - off)
634 status = NVME_SC_INTERNAL | NVME_SC_DNR;
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800635
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200636out:
637 nvmet_req_complete(req, status);
638}
639
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600640static void nvmet_execute_identify(struct nvmet_req *req)
641{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300642 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600643 return;
644
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600645 switch (req->cmd->identify.cns) {
646 case NVME_ID_CNS_NS:
647 return nvmet_execute_identify_ns(req);
648 case NVME_ID_CNS_CTRL:
649 return nvmet_execute_identify_ctrl(req);
650 case NVME_ID_CNS_NS_ACTIVE_LIST:
651 return nvmet_execute_identify_nslist(req);
652 case NVME_ID_CNS_NS_DESC_LIST:
653 return nvmet_execute_identify_desclist(req);
654 }
655
656 pr_err("unhandled identify cns %d on qid %d\n",
657 req->cmd->identify.cns, req->sq->qid);
658 req->error_loc = offsetof(struct nvme_identify, cns);
659 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
660}
661
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200662/*
Minwoo Im18c53e42017-11-07 21:10:22 +0900663 * A "minimum viable" abort implementation: the command is mandatory in the
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200664 * spec, but we are not required to do any useful work. We couldn't really
665 * do a useful abort, so don't bother even with waiting for the command
666 * to be exectuted and return immediately telling the command to abort
667 * wasn't found.
668 */
669static void nvmet_execute_abort(struct nvmet_req *req)
670{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300671 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600672 return;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200673 nvmet_set_result(req, 1);
674 nvmet_req_complete(req, 0);
675}
676
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700677static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
678{
679 u16 status;
680
681 if (req->ns->file)
682 status = nvmet_file_flush(req);
683 else
684 status = nvmet_bdev_flush(req);
685
686 if (status)
687 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
688 return status;
689}
690
691static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
692{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800693 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700694 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
695 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
696
697 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800698 if (unlikely(!req->ns)) {
699 req->error_loc = offsetof(struct nvme_common_command, nsid);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700700 return status;
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800701 }
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700702
703 mutex_lock(&subsys->lock);
704 switch (write_protect) {
705 case NVME_NS_WRITE_PROTECT:
706 req->ns->readonly = true;
707 status = nvmet_write_protect_flush_sync(req);
708 if (status)
709 req->ns->readonly = false;
710 break;
711 case NVME_NS_NO_WRITE_PROTECT:
712 req->ns->readonly = false;
713 status = 0;
714 break;
715 default:
716 break;
717 }
718
719 if (!status)
720 nvmet_ns_changed(subsys, req->ns->nsid);
721 mutex_unlock(&subsys->lock);
722 return status;
723}
724
Jay Sternberg90107452018-11-12 13:56:36 -0800725u16 nvmet_set_feat_kato(struct nvmet_req *req)
726{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800727 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800728
Amit Engel4e683c42020-09-16 20:47:20 +0300729 nvmet_stop_keep_alive_timer(req->sq->ctrl);
Jay Sternberg90107452018-11-12 13:56:36 -0800730 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
Amit Engel4e683c42020-09-16 20:47:20 +0300731 nvmet_start_keep_alive_timer(req->sq->ctrl);
Jay Sternberg90107452018-11-12 13:56:36 -0800732
733 nvmet_set_result(req, req->sq->ctrl->kato);
734
735 return 0;
736}
737
738u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
739{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800740 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800741
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800742 if (val32 & ~mask) {
743 req->error_loc = offsetof(struct nvme_common_command, cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800744 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800745 }
Jay Sternberg90107452018-11-12 13:56:36 -0800746
747 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
748 nvmet_set_result(req, val32);
749
750 return 0;
751}
752
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600753void nvmet_execute_set_features(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200754{
755 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800756 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
Amit Engel6d525f92020-02-29 16:28:41 -0800757 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200758 u16 status = 0;
Amit Engel6d525f92020-02-29 16:28:41 -0800759 u16 nsqr;
760 u16 ncqr;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200761
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300762 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600763 return;
764
Omri Mann28dd5cf2017-08-30 15:22:59 +0300765 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200766 case NVME_FEAT_NUM_QUEUES:
Amit Engel6d525f92020-02-29 16:28:41 -0800767 ncqr = (cdw11 >> 16) & 0xffff;
768 nsqr = cdw11 & 0xffff;
769 if (ncqr == 0xffff || nsqr == 0xffff) {
770 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
771 break;
772 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200773 nvmet_set_result(req,
774 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
775 break;
776 case NVME_FEAT_KATO:
Jay Sternberg90107452018-11-12 13:56:36 -0800777 status = nvmet_set_feat_kato(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200778 break;
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200779 case NVME_FEAT_ASYNC_EVENT:
Jay Sternberg90107452018-11-12 13:56:36 -0800780 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200781 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300782 case NVME_FEAT_HOST_ID:
783 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
784 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700785 case NVME_FEAT_WRITE_PROTECT:
786 status = nvmet_set_feat_write_protect(req);
787 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200788 default:
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800789 req->error_loc = offsetof(struct nvme_common_command, cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200790 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
791 break;
792 }
793
794 nvmet_req_complete(req, status);
795}
796
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700797static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
798{
799 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
800 u32 result;
801
802 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800803 if (!req->ns) {
804 req->error_loc = offsetof(struct nvme_common_command, nsid);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700805 return NVME_SC_INVALID_NS | NVME_SC_DNR;
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800806 }
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700807 mutex_lock(&subsys->lock);
808 if (req->ns->readonly == true)
809 result = NVME_NS_WRITE_PROTECT;
810 else
811 result = NVME_NS_NO_WRITE_PROTECT;
812 nvmet_set_result(req, result);
813 mutex_unlock(&subsys->lock);
814
815 return 0;
816}
817
Jay Sternberg90107452018-11-12 13:56:36 -0800818void nvmet_get_feat_kato(struct nvmet_req *req)
819{
820 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
821}
822
823void nvmet_get_feat_async_event(struct nvmet_req *req)
824{
825 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
826}
827
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600828void nvmet_execute_get_features(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200829{
830 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800831 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200832 u16 status = 0;
833
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300834 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600835 return;
836
Omri Mann28dd5cf2017-08-30 15:22:59 +0300837 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200838 /*
839 * These features are mandatory in the spec, but we don't
840 * have a useful way to implement them. We'll eventually
841 * need to come up with some fake values for these.
842 */
843#if 0
844 case NVME_FEAT_ARBITRATION:
845 break;
846 case NVME_FEAT_POWER_MGMT:
847 break;
848 case NVME_FEAT_TEMP_THRESH:
849 break;
850 case NVME_FEAT_ERR_RECOVERY:
851 break;
852 case NVME_FEAT_IRQ_COALESCE:
853 break;
854 case NVME_FEAT_IRQ_CONFIG:
855 break;
856 case NVME_FEAT_WRITE_ATOMIC:
857 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200858#endif
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200859 case NVME_FEAT_ASYNC_EVENT:
Jay Sternberg90107452018-11-12 13:56:36 -0800860 nvmet_get_feat_async_event(req);
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200861 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200862 case NVME_FEAT_VOLATILE_WC:
863 nvmet_set_result(req, 1);
864 break;
865 case NVME_FEAT_NUM_QUEUES:
866 nvmet_set_result(req,
867 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
868 break;
869 case NVME_FEAT_KATO:
Jay Sternberg90107452018-11-12 13:56:36 -0800870 nvmet_get_feat_kato(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200871 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300872 case NVME_FEAT_HOST_ID:
873 /* need 128-bit host identifier flag */
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800874 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800875 req->error_loc =
876 offsetof(struct nvme_common_command, cdw11);
Omri Mann28dd5cf2017-08-30 15:22:59 +0300877 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
878 break;
879 }
880
881 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
882 sizeof(req->sq->ctrl->hostid));
883 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700884 case NVME_FEAT_WRITE_PROTECT:
885 status = nvmet_get_feat_write_protect(req);
886 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200887 default:
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800888 req->error_loc =
889 offsetof(struct nvme_common_command, cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200890 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
891 break;
892 }
893
894 nvmet_req_complete(req, status);
895}
896
Jay Sternberg90107452018-11-12 13:56:36 -0800897void nvmet_execute_async_event(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200898{
899 struct nvmet_ctrl *ctrl = req->sq->ctrl;
900
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300901 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600902 return;
903
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200904 mutex_lock(&ctrl->lock);
905 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
906 mutex_unlock(&ctrl->lock);
907 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
908 return;
909 }
910 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
911 mutex_unlock(&ctrl->lock);
912
913 schedule_work(&ctrl->async_event_work);
914}
915
Jay Sternbergf9362ac2018-11-12 13:56:35 -0800916void nvmet_execute_keep_alive(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200917{
918 struct nvmet_ctrl *ctrl = req->sq->ctrl;
919
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300920 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600921 return;
922
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200923 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
924 ctrl->cntlid, ctrl->kato);
925
926 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
927 nvmet_req_complete(req, 0);
928}
929
Parav Pandit64a0ca82017-02-27 23:21:33 -0600930u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200931{
932 struct nvme_command *cmd = req->cmd;
Parav Pandit64a0ca82017-02-27 23:21:33 -0600933 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200934
Christoph Hellwigd84dd8c2019-10-25 15:38:58 +0200935 if (nvme_is_fabrics(cmd))
936 return nvmet_parse_fabrics_cmd(req);
937 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
938 return nvmet_parse_discovery_cmd(req);
939
Parav Pandit64a0ca82017-02-27 23:21:33 -0600940 ret = nvmet_check_ctrl_status(req, cmd);
941 if (unlikely(ret))
942 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200943
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600944 if (nvmet_req_passthru_ctrl(req))
945 return nvmet_parse_passthru_admin_cmd(req);
946
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200947 switch (cmd->common.opcode) {
948 case nvme_admin_get_log_page:
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600949 req->execute = nvmet_execute_get_log_page;
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600950 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200951 case nvme_admin_identify:
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600952 req->execute = nvmet_execute_identify;
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600953 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200954 case nvme_admin_abort_cmd:
955 req->execute = nvmet_execute_abort;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200956 return 0;
957 case nvme_admin_set_features:
958 req->execute = nvmet_execute_set_features;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200959 return 0;
960 case nvme_admin_get_features:
961 req->execute = nvmet_execute_get_features;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200962 return 0;
963 case nvme_admin_async_event:
964 req->execute = nvmet_execute_async_event;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200965 return 0;
966 case nvme_admin_keep_alive:
967 req->execute = nvmet_execute_keep_alive;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200968 return 0;
969 }
970
Parav Pandit64a0ca82017-02-27 23:21:33 -0600971 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
972 req->sq->qid);
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800973 req->error_loc = offsetof(struct nvme_common_command, opcode);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200974 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
975}