blob: bc6a774f21244f1231ea67a73dd577189812c446 [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +01008#include <linux/rculist.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +01009#include <linux/part_stat.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010010
Christoph Hellwiga07b4972016-06-21 18:04:20 +020011#include <generated/utsrelease.h>
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010012#include <asm/unaligned.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020013#include "nvmet.h"
14
15u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16{
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26}
27
Amit Engele17016f2020-01-08 01:47:24 +090028static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29{
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36}
37
Keith Buschd808b7f2019-04-09 10:03:59 -060038u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39{
40 return le64_to_cpu(cmd->get_log_page.lpo);
41}
42
Christoph Hellwig8ab08052018-05-22 11:10:03 +020043static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44{
Christoph Hellwige9061c32019-10-23 10:35:44 -060045 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
Christoph Hellwig8ab08052018-05-22 11:10:03 +020046}
47
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080048static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49{
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080051 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
Amit5f8badb2019-09-12 08:29:39 +030060 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080062 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
Amit5f8badb2019-09-12 08:29:39 +030071 nvmet_req_complete(req, 0);
Chaitanya Kulkarni11ad5072018-12-12 15:11:47 -080072}
73
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010074static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
76{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010077 u64 host_reads, host_writes, data_units_read, data_units_written;
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -080078 u16 status;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010079
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -080080 status = nvmet_req_find_ns(req);
81 if (status)
82 return status;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010083
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040084 /* we don't have the right data for file backed ns */
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080085 if (!req->ns->bdev)
86 return NVME_SC_SUCCESS;
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040087
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080088 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
Christoph Hellwig8446fe92020-11-24 09:36:54 +010089 data_units_read =
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080090 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
Christoph Hellwig8446fe92020-11-24 09:36:54 +010092 data_units_written =
Chaitanya Kulkarni624e67f2021-01-13 17:33:52 -080093 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010094
95 put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
Sagi Grimberg4185f252017-11-08 12:00:30 +020099
100 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100101}
102
103static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 struct nvme_smart_log *slog)
105{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100106 u64 host_reads = 0, host_writes = 0;
107 u64 data_units_read = 0, data_units_written = 0;
108 struct nvmet_ns *ns;
109 struct nvmet_ctrl *ctrl;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700110 unsigned long idx;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100111
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100112 ctrl = req->sq->ctrl;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400114 /* we don't have the right data for file backed ns */
115 if (!ns->bdev)
116 continue;
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100117 host_reads += part_stat_read(ns->bdev, ios[READ]);
Tom Wu3bec2e32019-08-08 02:22:36 +0000118 data_units_read += DIV_ROUND_UP(
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100119 part_stat_read(ns->bdev, sectors[READ]), 1000);
120 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
Tom Wu3bec2e32019-08-08 02:22:36 +0000121 data_units_written += DIV_ROUND_UP(
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100122 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100123 }
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100124
125 put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129
Sagi Grimberg4185f252017-11-08 12:00:30 +0200130 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100131}
132
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200133static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100134{
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200135 struct nvme_smart_log *log;
136 u16 status = NVME_SC_INTERNAL;
Chaitanya Kulkarni23454d52018-12-12 15:11:48 -0800137 unsigned long flags;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100138
Christoph Hellwige9061c32019-10-23 10:35:44 -0600139 if (req->transfer_len != sizeof(*log))
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200140 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200141
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200142 log = kzalloc(sizeof(*log), GFP_KERNEL);
143 if (!log)
144 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200145
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 status = nvmet_get_smart_log_all(req, log);
148 else
149 status = nvmet_get_smart_log_nsid(req, log);
150 if (status)
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400151 goto out_free_log;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200152
Chaitanya Kulkarni23454d52018-12-12 15:11:48 -0800153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 put_unaligned_le64(req->sq->ctrl->err_counter,
155 &log->num_err_log_entries);
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400159out_free_log:
160 kfree(log);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200161out:
162 nvmet_req_complete(req, status);
163}
164
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400165static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
166{
167 u16 status = NVME_SC_INTERNAL;
168 struct nvme_effects_log *log;
169
170 log = kzalloc(sizeof(*log), GFP_KERNEL);
171 if (!log)
172 goto out;
173
174 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
175 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
176 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
177 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
178 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
179 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
180 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
181
182 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
183 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
184 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
185 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
186 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
187
188 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
189
190 kfree(log);
191out:
192 nvmet_req_complete(req, status);
193}
194
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200195static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
196{
197 struct nvmet_ctrl *ctrl = req->sq->ctrl;
198 u16 status = NVME_SC_INTERNAL;
199 size_t len;
200
Christoph Hellwige9061c32019-10-23 10:35:44 -0600201 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200202 goto out;
203
204 mutex_lock(&ctrl->lock);
205 if (ctrl->nr_changed_ns == U32_MAX)
206 len = sizeof(__le32);
207 else
208 len = ctrl->nr_changed_ns * sizeof(__le32);
209 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
210 if (!status)
Christoph Hellwige9061c32019-10-23 10:35:44 -0600211 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200212 ctrl->nr_changed_ns = 0;
Jay Sternberg7114dde2018-11-12 13:56:34 -0800213 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200214 mutex_unlock(&ctrl->lock);
215out:
216 nvmet_req_complete(req, status);
217}
218
Christoph Hellwig72efd252018-07-19 07:35:20 -0700219static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
220 struct nvme_ana_group_desc *desc)
221{
222 struct nvmet_ctrl *ctrl = req->sq->ctrl;
223 struct nvmet_ns *ns;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700224 unsigned long idx;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700225 u32 count = 0;
226
227 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700228 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
Christoph Hellwig72efd252018-07-19 07:35:20 -0700229 if (ns->anagrpid == grpid)
230 desc->nsids[count++] = cpu_to_le32(ns->nsid);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700231 }
232
233 desc->grpid = cpu_to_le32(grpid);
234 desc->nnsids = cpu_to_le32(count);
235 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
236 desc->state = req->port->ana_state[grpid];
237 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
238 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
239}
240
241static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
242{
243 struct nvme_ana_rsp_hdr hdr = { 0, };
244 struct nvme_ana_group_desc *desc;
245 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
246 size_t len;
247 u32 grpid;
248 u16 ngrps = 0;
249 u16 status;
250
251 status = NVME_SC_INTERNAL;
252 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
253 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
254 if (!desc)
255 goto out;
256
257 down_read(&nvmet_ana_sem);
258 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
259 if (!nvmet_ana_group_enabled[grpid])
260 continue;
261 len = nvmet_format_ana_group(req, grpid, desc);
262 status = nvmet_copy_to_sgl(req, offset, desc, len);
263 if (status)
264 break;
265 offset += len;
266 ngrps++;
267 }
Hannes Reineckebe1277f2018-07-16 12:58:33 +0200268 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
269 if (nvmet_ana_group_enabled[grpid])
270 ngrps++;
271 }
Christoph Hellwig72efd252018-07-19 07:35:20 -0700272
273 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
274 hdr.ngrps = cpu_to_le16(ngrps);
Jay Sternberg7114dde2018-11-12 13:56:34 -0800275 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700276 up_read(&nvmet_ana_sem);
277
278 kfree(desc);
279
280 /* copy the header last once we know the number of groups */
281 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
282out:
283 nvmet_req_complete(req, status);
284}
285
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600286static void nvmet_execute_get_log_page(struct nvmet_req *req)
287{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300288 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600289 return;
290
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600291 switch (req->cmd->get_log_page.lid) {
292 case NVME_LOG_ERROR:
293 return nvmet_execute_get_log_page_error(req);
294 case NVME_LOG_SMART:
295 return nvmet_execute_get_log_page_smart(req);
296 case NVME_LOG_FW_SLOT:
297 /*
298 * We only support a single firmware slot which always is
299 * active, so we can zero out the whole firmware slot log and
300 * still claim to fully implement this mandatory log page.
301 */
302 return nvmet_execute_get_log_page_noop(req);
303 case NVME_LOG_CHANGED_NS:
304 return nvmet_execute_get_log_changed_ns(req);
305 case NVME_LOG_CMD_EFFECTS:
306 return nvmet_execute_get_log_cmd_effects_ns(req);
307 case NVME_LOG_ANA:
308 return nvmet_execute_get_log_page_ana(req);
309 }
310 pr_err("unhandled lid %d on qid %d\n",
311 req->cmd->get_log_page.lid, req->sq->qid);
312 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
313 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
314}
315
Mark Ruijter013b7eb2020-01-30 10:29:32 -0800316static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
317 struct nvmet_subsys *subsys)
318{
319 const char *model = NVMET_DEFAULT_CTRL_MODEL;
320 struct nvmet_subsys_model *subsys_model;
321
322 rcu_read_lock();
323 subsys_model = rcu_dereference(subsys->model);
324 if (subsys_model)
325 model = subsys_model->number;
326 memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
327 rcu_read_unlock();
328}
329
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200330static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
331{
332 struct nvmet_ctrl *ctrl = req->sq->ctrl;
333 struct nvme_id_ctrl *id;
Israel Rukshinea52ac12020-05-19 17:06:01 +0300334 u32 cmd_capsule_size;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200335 u16 status = 0;
336
337 id = kzalloc(sizeof(*id), GFP_KERNEL);
338 if (!id) {
339 status = NVME_SC_INTERNAL;
340 goto out;
341 }
342
343 /* XXX: figure out how to assign real vendors IDs. */
344 id->vid = 0;
345 id->ssvid = 0;
346
Daniel Verkampc7399692018-04-12 09:16:13 -0600347 memset(id->sn, ' ', sizeof(id->sn));
Martin Wilck42de82a2017-07-14 00:25:31 +0200348 bin2hex(id->sn, &ctrl->subsys->serial,
349 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
Mark Ruijter013b7eb2020-01-30 10:29:32 -0800350 nvmet_id_set_model_number(id, ctrl->subsys);
Martin Wilck17c39d02017-08-14 22:12:39 +0200351 memcpy_and_pad(id->fr, sizeof(id->fr),
352 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200353
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200354 id->rab = 6;
355
356 /*
357 * XXX: figure out how we can assign a IEEE OUI, but until then
358 * the safest is to leave it as zeroes.
359 */
360
Christoph Hellwig72efd252018-07-19 07:35:20 -0700361 /* we support multiple ports, multiples hosts and ANA: */
362 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200363
Max Gurtovoy02cb00e2020-03-08 12:55:03 +0200364 /* Limit MDTS according to transport capability */
365 if (ctrl->ops->get_mdts)
366 id->mdts = ctrl->ops->get_mdts(ctrl);
367 else
368 id->mdts = 0;
369
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200370 id->cntlid = cpu_to_le16(ctrl->cntlid);
371 id->ver = cpu_to_le32(ctrl->subsys->ver);
372
373 /* XXX: figure out what to do about RTD3R/RTD3 */
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200374 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
Sagi Grimbergc09305a2018-11-02 10:28:13 -0700375 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
376 NVME_CTRL_ATTR_TBKAS);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200377
378 id->oacs = 0;
379
380 /*
381 * We don't really have a practical limit on the number of abort
382 * comands. But we don't do anything useful for abort either, so
383 * no point in allowing more abort commands than the spec requires.
384 */
385 id->acl = 3;
386
387 id->aerl = NVMET_ASYNC_EVENTS - 1;
388
389 /* first slot is read-only, only one slot supported */
390 id->frmw = (1 << 0) | (1 << 1);
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400391 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200392 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
393 id->npss = 0;
394
395 /* We support keep-alive timeout in granularity of seconds */
396 id->kas = cpu_to_le16(NVMET_KAS);
397
398 id->sqes = (0x6 << 4) | 0x6;
399 id->cqes = (0x4 << 4) | 0x4;
400
401 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
402 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
403
404 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200405 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800406 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
407 NVME_CTRL_ONCS_WRITE_ZEROES);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200408
409 /* XXX: don't report vwc if the underlying device is write through */
410 id->vwc = NVME_CTRL_VWC_PRESENT;
411
412 /*
413 * We can't support atomic writes bigger than a LBA without support
414 * from the backend device.
415 */
416 id->awun = 0;
417 id->awupf = 0;
418
419 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
Max Gurtovoy6fa350f2020-06-02 16:15:46 +0300420 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200421 id->sgls |= cpu_to_le32(1 << 2);
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700422 if (req->port->inline_data_size)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200423 id->sgls |= cpu_to_le32(1 << 20);
424
Bart Van Assche5eadc9c2018-10-08 14:28:51 -0700425 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200426
Israel Rukshinea52ac12020-05-19 17:06:01 +0300427 /*
428 * Max command capsule size is sqe + in-capsule data size.
429 * Disable in-capsule data for Metadata capable controllers.
430 */
431 cmd_capsule_size = sizeof(struct nvme_command);
432 if (!ctrl->pi_support)
433 cmd_capsule_size += req->port->inline_data_size;
434 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
435
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200436 /* Max response capsule size is cqe */
437 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
438
439 id->msdbd = ctrl->ops->msdbd;
440
Christoph Hellwig72efd252018-07-19 07:35:20 -0700441 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
442 id->anatt = 10; /* random value */
443 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
444 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
445
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200446 /*
447 * Meh, we don't really support any power state. Fake up the same
448 * values that qemu does.
449 */
450 id->psd[0].max_power = cpu_to_le16(0x9c4);
451 id->psd[0].entry_lat = cpu_to_le32(0x10);
452 id->psd[0].exit_lat = cpu_to_le32(0x4);
453
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700454 id->nwpc = 1 << 0; /* write protect and no write protect */
455
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200456 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
457
458 kfree(id);
459out:
460 nvmet_req_complete(req, status);
461}
462
463static void nvmet_execute_identify_ns(struct nvmet_req *req)
464{
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200465 struct nvme_id_ns *id;
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800466 u16 status;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200467
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200468 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800469 req->error_loc = offsetof(struct nvme_identify, nsid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200470 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
471 goto out;
472 }
473
474 id = kzalloc(sizeof(*id), GFP_KERNEL);
475 if (!id) {
476 status = NVME_SC_INTERNAL;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200477 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200478 }
479
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200480 /* return an all zeroed buffer if we can't find an active namespace */
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800481 status = nvmet_req_find_ns(req);
482 if (status) {
Chaitanya Kulkarni40244ad2021-02-09 21:47:52 -0800483 status = 0;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200484 goto done;
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800485 }
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200486
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800487 nvmet_ns_revalidate(req->ns);
Anthony Iliopoulose8cd1ff2020-04-19 16:48:50 -0700488
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200489 /*
Minwoo Im18c53e42017-11-07 21:10:22 +0900490 * nuse = ncap = nsze isn't always true, but we have no way to find
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200491 * that out from the underlying device.
492 */
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800493 id->ncap = id->nsze =
494 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
495 switch (req->port->ana_state[req->ns->anagrpid]) {
Christoph Hellwig72efd252018-07-19 07:35:20 -0700496 case NVME_ANA_INACCESSIBLE:
497 case NVME_ANA_PERSISTENT_LOSS:
498 break;
499 default:
500 id->nuse = id->nsze;
501 break;
502 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200503
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800504 if (req->ns->bdev)
505 nvmet_bdev_set_limits(req->ns->bdev, id);
Bart Van Assche9d05a962019-06-28 09:53:30 -0700506
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200507 /*
508 * We just provide a single LBA format that matches what the
509 * underlying device reports.
510 */
511 id->nlbaf = 0;
512 id->flbas = 0;
513
514 /*
515 * Our namespace might always be shared. Not just with other
516 * controllers, but also with any other user of the block device.
517 */
518 id->nmic = (1 << 0);
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800519 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200520
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800521 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200522
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800523 id->lbaf[0].ds = req->ns->blksize_shift;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200524
Chaitanya Kulkarni39994342021-02-09 21:47:55 -0800525 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
Israel Rukshinea52ac12020-05-19 17:06:01 +0300526 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
527 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
528 NVME_NS_DPC_PI_TYPE3;
529 id->mc = NVME_MC_EXTENDED_LBA;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800530 id->dps = req->ns->pi_type;
Israel Rukshinea52ac12020-05-19 17:06:01 +0300531 id->flbas = NVME_NS_FLBAS_META_EXT;
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800532 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
Israel Rukshinea52ac12020-05-19 17:06:01 +0300533 }
534
Chaitanya Kulkarni3c7b2242021-01-13 17:33:54 -0800535 if (req->ns->readonly)
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700536 id->nsattr |= (1 << 0);
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200537done:
Chaitanya Kulkarnibffcd502021-01-13 17:33:51 -0800538 if (!status)
539 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
540
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200541 kfree(id);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200542out:
543 nvmet_req_complete(req, status);
544}
545
546static void nvmet_execute_identify_nslist(struct nvmet_req *req)
547{
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200548 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200549 struct nvmet_ctrl *ctrl = req->sq->ctrl;
550 struct nvmet_ns *ns;
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700551 unsigned long idx;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200552 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
553 __le32 *list;
554 u16 status = 0;
555 int i = 0;
556
557 list = kzalloc(buf_size, GFP_KERNEL);
558 if (!list) {
559 status = NVME_SC_INTERNAL;
560 goto out;
561 }
562
Chaitanya Kulkarni7774e772020-07-19 20:32:02 -0700563 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200564 if (ns->nsid <= min_nsid)
565 continue;
566 list[i++] = cpu_to_le32(ns->nsid);
567 if (i == buf_size / sizeof(__le32))
568 break;
569 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200570
571 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
572
573 kfree(list);
574out:
575 nvmet_req_complete(req, status);
576}
577
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200578static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
579 void *id, off_t *off)
580{
581 struct nvme_ns_id_desc desc = {
582 .nidt = type,
583 .nidl = len,
584 };
585 u16 status;
586
587 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
588 if (status)
589 return status;
590 *off += sizeof(desc);
591
592 status = nvmet_copy_to_sgl(req, *off, id, len);
593 if (status)
594 return status;
595 *off += len;
596
597 return 0;
598}
599
600static void nvmet_execute_identify_desclist(struct nvmet_req *req)
601{
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200602 off_t off = 0;
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800603 u16 status;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200604
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800605 status = nvmet_req_find_ns(req);
606 if (status)
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200607 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200608
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800609 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200610 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
611 NVME_NIDT_UUID_LEN,
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800612 &req->ns->uuid, &off);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200613 if (status)
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800614 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200615 }
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800616 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200617 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
618 NVME_NIDT_NGUID_LEN,
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800619 &req->ns->nguid, &off);
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200620 if (status)
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800621 goto out;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200622 }
623
624 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
625 off) != NVME_IDENTIFY_DATA_SIZE - off)
626 status = NVME_SC_INTERNAL | NVME_SC_DNR;
Chaitanya Kulkarni3631c7f2021-01-13 17:33:53 -0800627
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200628out:
629 nvmet_req_complete(req, status);
630}
631
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600632static void nvmet_execute_identify(struct nvmet_req *req)
633{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300634 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600635 return;
636
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600637 switch (req->cmd->identify.cns) {
638 case NVME_ID_CNS_NS:
639 return nvmet_execute_identify_ns(req);
640 case NVME_ID_CNS_CTRL:
641 return nvmet_execute_identify_ctrl(req);
642 case NVME_ID_CNS_NS_ACTIVE_LIST:
643 return nvmet_execute_identify_nslist(req);
644 case NVME_ID_CNS_NS_DESC_LIST:
645 return nvmet_execute_identify_desclist(req);
646 }
647
648 pr_err("unhandled identify cns %d on qid %d\n",
649 req->cmd->identify.cns, req->sq->qid);
650 req->error_loc = offsetof(struct nvme_identify, cns);
651 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
652}
653
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200654/*
Minwoo Im18c53e42017-11-07 21:10:22 +0900655 * A "minimum viable" abort implementation: the command is mandatory in the
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200656 * spec, but we are not required to do any useful work. We couldn't really
657 * do a useful abort, so don't bother even with waiting for the command
658 * to be exectuted and return immediately telling the command to abort
659 * wasn't found.
660 */
661static void nvmet_execute_abort(struct nvmet_req *req)
662{
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300663 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600664 return;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200665 nvmet_set_result(req, 1);
666 nvmet_req_complete(req, 0);
667}
668
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700669static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
670{
671 u16 status;
672
673 if (req->ns->file)
674 status = nvmet_file_flush(req);
675 else
676 status = nvmet_bdev_flush(req);
677
678 if (status)
679 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
680 return status;
681}
682
683static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
684{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800685 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800686 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800687 u16 status;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700688
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800689 status = nvmet_req_find_ns(req);
690 if (status)
691 return status;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700692
693 mutex_lock(&subsys->lock);
694 switch (write_protect) {
695 case NVME_NS_WRITE_PROTECT:
696 req->ns->readonly = true;
697 status = nvmet_write_protect_flush_sync(req);
698 if (status)
699 req->ns->readonly = false;
700 break;
701 case NVME_NS_NO_WRITE_PROTECT:
702 req->ns->readonly = false;
703 status = 0;
704 break;
705 default:
706 break;
707 }
708
709 if (!status)
710 nvmet_ns_changed(subsys, req->ns->nsid);
711 mutex_unlock(&subsys->lock);
712 return status;
713}
714
Jay Sternberg90107452018-11-12 13:56:36 -0800715u16 nvmet_set_feat_kato(struct nvmet_req *req)
716{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800717 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800718
Amit Engel4e683c42020-09-16 20:47:20 +0300719 nvmet_stop_keep_alive_timer(req->sq->ctrl);
Jay Sternberg90107452018-11-12 13:56:36 -0800720 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
Amit Engel4e683c42020-09-16 20:47:20 +0300721 nvmet_start_keep_alive_timer(req->sq->ctrl);
Jay Sternberg90107452018-11-12 13:56:36 -0800722
723 nvmet_set_result(req, req->sq->ctrl->kato);
724
725 return 0;
726}
727
728u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
729{
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800730 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800731
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800732 if (val32 & ~mask) {
733 req->error_loc = offsetof(struct nvme_common_command, cdw11);
Jay Sternberg90107452018-11-12 13:56:36 -0800734 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800735 }
Jay Sternberg90107452018-11-12 13:56:36 -0800736
737 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
738 nvmet_set_result(req, val32);
739
740 return 0;
741}
742
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600743void nvmet_execute_set_features(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200744{
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800745 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800746 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
Amit Engel6d525f92020-02-29 16:28:41 -0800747 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200748 u16 status = 0;
Amit Engel6d525f92020-02-29 16:28:41 -0800749 u16 nsqr;
750 u16 ncqr;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200751
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300752 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600753 return;
754
Omri Mann28dd5cf2017-08-30 15:22:59 +0300755 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200756 case NVME_FEAT_NUM_QUEUES:
Amit Engel6d525f92020-02-29 16:28:41 -0800757 ncqr = (cdw11 >> 16) & 0xffff;
758 nsqr = cdw11 & 0xffff;
759 if (ncqr == 0xffff || nsqr == 0xffff) {
760 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
761 break;
762 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200763 nvmet_set_result(req,
764 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
765 break;
766 case NVME_FEAT_KATO:
Jay Sternberg90107452018-11-12 13:56:36 -0800767 status = nvmet_set_feat_kato(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200768 break;
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200769 case NVME_FEAT_ASYNC_EVENT:
Jay Sternberg90107452018-11-12 13:56:36 -0800770 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200771 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300772 case NVME_FEAT_HOST_ID:
773 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
774 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700775 case NVME_FEAT_WRITE_PROTECT:
776 status = nvmet_set_feat_write_protect(req);
777 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200778 default:
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800779 req->error_loc = offsetof(struct nvme_common_command, cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200780 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
781 break;
782 }
783
784 nvmet_req_complete(req, status);
785}
786
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700787static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
788{
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800789 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700790 u32 result;
791
Chaitanya Kulkarni3a1f7c72021-02-09 21:47:54 -0800792 result = nvmet_req_find_ns(req);
793 if (result)
794 return result;
795
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700796 mutex_lock(&subsys->lock);
797 if (req->ns->readonly == true)
798 result = NVME_NS_WRITE_PROTECT;
799 else
800 result = NVME_NS_NO_WRITE_PROTECT;
801 nvmet_set_result(req, result);
802 mutex_unlock(&subsys->lock);
803
804 return 0;
805}
806
Jay Sternberg90107452018-11-12 13:56:36 -0800807void nvmet_get_feat_kato(struct nvmet_req *req)
808{
809 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
810}
811
812void nvmet_get_feat_async_event(struct nvmet_req *req)
813{
814 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
815}
816
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600817void nvmet_execute_get_features(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200818{
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800819 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800820 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200821 u16 status = 0;
822
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300823 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600824 return;
825
Omri Mann28dd5cf2017-08-30 15:22:59 +0300826 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200827 /*
828 * These features are mandatory in the spec, but we don't
829 * have a useful way to implement them. We'll eventually
830 * need to come up with some fake values for these.
831 */
832#if 0
833 case NVME_FEAT_ARBITRATION:
834 break;
835 case NVME_FEAT_POWER_MGMT:
836 break;
837 case NVME_FEAT_TEMP_THRESH:
838 break;
839 case NVME_FEAT_ERR_RECOVERY:
840 break;
841 case NVME_FEAT_IRQ_COALESCE:
842 break;
843 case NVME_FEAT_IRQ_CONFIG:
844 break;
845 case NVME_FEAT_WRITE_ATOMIC:
846 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200847#endif
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200848 case NVME_FEAT_ASYNC_EVENT:
Jay Sternberg90107452018-11-12 13:56:36 -0800849 nvmet_get_feat_async_event(req);
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200850 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200851 case NVME_FEAT_VOLATILE_WC:
852 nvmet_set_result(req, 1);
853 break;
854 case NVME_FEAT_NUM_QUEUES:
855 nvmet_set_result(req,
856 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
857 break;
858 case NVME_FEAT_KATO:
Jay Sternberg90107452018-11-12 13:56:36 -0800859 nvmet_get_feat_kato(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200860 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300861 case NVME_FEAT_HOST_ID:
862 /* need 128-bit host identifier flag */
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800863 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800864 req->error_loc =
865 offsetof(struct nvme_common_command, cdw11);
Omri Mann28dd5cf2017-08-30 15:22:59 +0300866 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
867 break;
868 }
869
870 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
871 sizeof(req->sq->ctrl->hostid));
872 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700873 case NVME_FEAT_WRITE_PROTECT:
874 status = nvmet_get_feat_write_protect(req);
875 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200876 default:
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800877 req->error_loc =
878 offsetof(struct nvme_common_command, cdw10);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200879 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
880 break;
881 }
882
883 nvmet_req_complete(req, status);
884}
885
Jay Sternberg90107452018-11-12 13:56:36 -0800886void nvmet_execute_async_event(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200887{
888 struct nvmet_ctrl *ctrl = req->sq->ctrl;
889
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300890 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600891 return;
892
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200893 mutex_lock(&ctrl->lock);
894 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
895 mutex_unlock(&ctrl->lock);
896 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
897 return;
898 }
899 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
900 mutex_unlock(&ctrl->lock);
901
902 schedule_work(&ctrl->async_event_work);
903}
904
Jay Sternbergf9362ac2018-11-12 13:56:35 -0800905void nvmet_execute_keep_alive(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200906{
907 struct nvmet_ctrl *ctrl = req->sq->ctrl;
908
Israel Rukshin136cc1f2020-05-19 17:05:59 +0300909 if (!nvmet_check_transfer_len(req, 0))
Christoph Hellwige9061c32019-10-23 10:35:44 -0600910 return;
911
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200912 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
913 ctrl->cntlid, ctrl->kato);
914
915 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
916 nvmet_req_complete(req, 0);
917}
918
Parav Pandit64a0ca82017-02-27 23:21:33 -0600919u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200920{
921 struct nvme_command *cmd = req->cmd;
Parav Pandit64a0ca82017-02-27 23:21:33 -0600922 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200923
Christoph Hellwigd84dd8c2019-10-25 15:38:58 +0200924 if (nvme_is_fabrics(cmd))
925 return nvmet_parse_fabrics_cmd(req);
Chaitanya Kulkarni20c2c3b2021-02-09 21:48:01 -0800926 if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
Christoph Hellwigd84dd8c2019-10-25 15:38:58 +0200927 return nvmet_parse_discovery_cmd(req);
928
Parav Pandit64a0ca82017-02-27 23:21:33 -0600929 ret = nvmet_check_ctrl_status(req, cmd);
930 if (unlikely(ret))
931 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200932
Logan Gunthorpec1fef73f2020-07-24 11:25:17 -0600933 if (nvmet_req_passthru_ctrl(req))
934 return nvmet_parse_passthru_admin_cmd(req);
935
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200936 switch (cmd->common.opcode) {
937 case nvme_admin_get_log_page:
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600938 req->execute = nvmet_execute_get_log_page;
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600939 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200940 case nvme_admin_identify:
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600941 req->execute = nvmet_execute_identify;
Christoph Hellwig2cb69632019-10-23 10:35:41 -0600942 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200943 case nvme_admin_abort_cmd:
944 req->execute = nvmet_execute_abort;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200945 return 0;
946 case nvme_admin_set_features:
947 req->execute = nvmet_execute_set_features;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200948 return 0;
949 case nvme_admin_get_features:
950 req->execute = nvmet_execute_get_features;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200951 return 0;
952 case nvme_admin_async_event:
953 req->execute = nvmet_execute_async_event;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200954 return 0;
955 case nvme_admin_keep_alive:
956 req->execute = nvmet_execute_keep_alive;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200957 return 0;
958 }
959
Parav Pandit64a0ca82017-02-27 23:21:33 -0600960 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
961 req->sq->qid);
Chaitanya Kulkarni2da6e002018-12-12 15:11:46 -0800962 req->error_loc = offsetof(struct nvme_common_command, opcode);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200963 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
964}