blob: d1de639786ee7554e2083f22cfd203f094609977 [file] [log] [blame]
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001/*
2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010016#include <linux/rculist.h>
17
Christoph Hellwiga07b4972016-06-21 18:04:20 +020018#include <generated/utsrelease.h>
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010019#include <asm/unaligned.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020020#include "nvmet.h"
21
22u32 nvmet_get_log_page_len(struct nvme_command *cmd)
23{
24 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
25
26 len <<= 16;
27 len += le16_to_cpu(cmd->get_log_page.numdl);
28 /* NUMD is a 0's based value */
29 len += 1;
30 len *= sizeof(u32);
31
32 return len;
33}
34
Christoph Hellwig8ab08052018-05-22 11:10:03 +020035static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
36{
37 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
38}
39
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010040static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
41 struct nvme_smart_log *slog)
42{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010043 struct nvmet_ns *ns;
44 u64 host_reads, host_writes, data_units_read, data_units_written;
45
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010046 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
47 if (!ns) {
Colin Ian Kingb38b9052016-12-27 16:04:09 +000048 pr_err("nvmet : Could not find namespace id : %d\n",
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010049 le32_to_cpu(req->cmd->get_log_page.nsid));
Sagi Grimberg4185f252017-11-08 12:00:30 +020050 return NVME_SC_INVALID_NS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010051 }
52
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040053 /* we don't have the right data for file backed ns */
54 if (!ns->bdev)
55 goto out;
56
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010057 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
58 data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
59 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
60 data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
61
62 put_unaligned_le64(host_reads, &slog->host_reads[0]);
63 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
64 put_unaligned_le64(host_writes, &slog->host_writes[0]);
65 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040066out:
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010067 nvmet_put_namespace(ns);
Sagi Grimberg4185f252017-11-08 12:00:30 +020068
69 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010070}
71
72static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
73 struct nvme_smart_log *slog)
74{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010075 u64 host_reads = 0, host_writes = 0;
76 u64 data_units_read = 0, data_units_written = 0;
77 struct nvmet_ns *ns;
78 struct nvmet_ctrl *ctrl;
79
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010080 ctrl = req->sq->ctrl;
81
82 rcu_read_lock();
83 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040084 /* we don't have the right data for file backed ns */
85 if (!ns->bdev)
86 continue;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010087 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
88 data_units_read +=
89 part_stat_read(ns->bdev->bd_part, sectors[READ]);
90 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
91 data_units_written +=
92 part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
93
94 }
95 rcu_read_unlock();
96
97 put_unaligned_le64(host_reads, &slog->host_reads[0]);
98 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
99 put_unaligned_le64(host_writes, &slog->host_writes[0]);
100 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
101
Sagi Grimberg4185f252017-11-08 12:00:30 +0200102 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100103}
104
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200105static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100106{
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200107 struct nvme_smart_log *log;
108 u16 status = NVME_SC_INTERNAL;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100109
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200110 if (req->data_len != sizeof(*log))
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200111 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200112
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200113 log = kzalloc(sizeof(*log), GFP_KERNEL);
114 if (!log)
115 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200116
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200117 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
118 status = nvmet_get_smart_log_all(req, log);
119 else
120 status = nvmet_get_smart_log_nsid(req, log);
121 if (status)
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400122 goto out_free_log;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200123
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200124 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400125out_free_log:
126 kfree(log);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200127out:
128 nvmet_req_complete(req, status);
129}
130
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400131static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
132{
133 u16 status = NVME_SC_INTERNAL;
134 struct nvme_effects_log *log;
135
136 log = kzalloc(sizeof(*log), GFP_KERNEL);
137 if (!log)
138 goto out;
139
140 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
141 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
142 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
143 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
144 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
145 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
146 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
147
148 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
149 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
150 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
151 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
152 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
153
154 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
155
156 kfree(log);
157out:
158 nvmet_req_complete(req, status);
159}
160
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200161static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
162{
163 struct nvmet_ctrl *ctrl = req->sq->ctrl;
164 u16 status = NVME_SC_INTERNAL;
165 size_t len;
166
167 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
168 goto out;
169
170 mutex_lock(&ctrl->lock);
171 if (ctrl->nr_changed_ns == U32_MAX)
172 len = sizeof(__le32);
173 else
174 len = ctrl->nr_changed_ns * sizeof(__le32);
175 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
176 if (!status)
177 status = nvmet_zero_sgl(req, len, req->data_len - len);
178 ctrl->nr_changed_ns = 0;
Christoph Hellwig55fdd6b2018-05-30 15:05:09 +0200179 clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200180 mutex_unlock(&ctrl->lock);
181out:
182 nvmet_req_complete(req, status);
183}
184
Christoph Hellwig72efd252018-07-19 07:35:20 -0700185static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
186 struct nvme_ana_group_desc *desc)
187{
188 struct nvmet_ctrl *ctrl = req->sq->ctrl;
189 struct nvmet_ns *ns;
190 u32 count = 0;
191
192 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
193 rcu_read_lock();
194 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
195 if (ns->anagrpid == grpid)
196 desc->nsids[count++] = cpu_to_le32(ns->nsid);
197 rcu_read_unlock();
198 }
199
200 desc->grpid = cpu_to_le32(grpid);
201 desc->nnsids = cpu_to_le32(count);
202 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
203 desc->state = req->port->ana_state[grpid];
204 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
205 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
206}
207
208static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
209{
210 struct nvme_ana_rsp_hdr hdr = { 0, };
211 struct nvme_ana_group_desc *desc;
212 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
213 size_t len;
214 u32 grpid;
215 u16 ngrps = 0;
216 u16 status;
217
218 status = NVME_SC_INTERNAL;
219 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
220 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
221 if (!desc)
222 goto out;
223
224 down_read(&nvmet_ana_sem);
225 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
226 if (!nvmet_ana_group_enabled[grpid])
227 continue;
228 len = nvmet_format_ana_group(req, grpid, desc);
229 status = nvmet_copy_to_sgl(req, offset, desc, len);
230 if (status)
231 break;
232 offset += len;
233 ngrps++;
234 }
235
236 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
237 hdr.ngrps = cpu_to_le16(ngrps);
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200238 clear_bit(NVME_AEN_CFG_ANA_CHANGE, &req->sq->ctrl->aen_masked);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700239 up_read(&nvmet_ana_sem);
240
241 kfree(desc);
242
243 /* copy the header last once we know the number of groups */
244 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
245out:
246 nvmet_req_complete(req, status);
247}
248
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200249static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
250{
251 struct nvmet_ctrl *ctrl = req->sq->ctrl;
252 struct nvme_id_ctrl *id;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200253 u16 status = 0;
Martin Wilck42de82a2017-07-14 00:25:31 +0200254 const char model[] = "Linux";
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200255
256 id = kzalloc(sizeof(*id), GFP_KERNEL);
257 if (!id) {
258 status = NVME_SC_INTERNAL;
259 goto out;
260 }
261
262 /* XXX: figure out how to assign real vendors IDs. */
263 id->vid = 0;
264 id->ssvid = 0;
265
Daniel Verkampc7399692018-04-12 09:16:13 -0600266 memset(id->sn, ' ', sizeof(id->sn));
Martin Wilck42de82a2017-07-14 00:25:31 +0200267 bin2hex(id->sn, &ctrl->subsys->serial,
268 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
Martin Wilck17c39d02017-08-14 22:12:39 +0200269 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
270 memcpy_and_pad(id->fr, sizeof(id->fr),
271 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200272
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200273 id->rab = 6;
274
275 /*
276 * XXX: figure out how we can assign a IEEE OUI, but until then
277 * the safest is to leave it as zeroes.
278 */
279
Christoph Hellwig72efd252018-07-19 07:35:20 -0700280 /* we support multiple ports, multiples hosts and ANA: */
281 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200282
283 /* no limit on data transfer sizes for now */
284 id->mdts = 0;
285 id->cntlid = cpu_to_le16(ctrl->cntlid);
286 id->ver = cpu_to_le32(ctrl->subsys->ver);
287
288 /* XXX: figure out what to do about RTD3R/RTD3 */
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200289 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200290 id->ctratt = cpu_to_le32(1 << 0);
291
292 id->oacs = 0;
293
294 /*
295 * We don't really have a practical limit on the number of abort
296 * comands. But we don't do anything useful for abort either, so
297 * no point in allowing more abort commands than the spec requires.
298 */
299 id->acl = 3;
300
301 id->aerl = NVMET_ASYNC_EVENTS - 1;
302
303 /* first slot is read-only, only one slot supported */
304 id->frmw = (1 << 0) | (1 << 1);
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400305 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200306 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
307 id->npss = 0;
308
309 /* We support keep-alive timeout in granularity of seconds */
310 id->kas = cpu_to_le16(NVMET_KAS);
311
312 id->sqes = (0x6 << 4) | 0x6;
313 id->cqes = (0x4 << 4) | 0x4;
314
315 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
316 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
317
318 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200319 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800320 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
321 NVME_CTRL_ONCS_WRITE_ZEROES);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200322
323 /* XXX: don't report vwc if the underlying device is write through */
324 id->vwc = NVME_CTRL_VWC_PRESENT;
325
326 /*
327 * We can't support atomic writes bigger than a LBA without support
328 * from the backend device.
329 */
330 id->awun = 0;
331 id->awupf = 0;
332
333 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
334 if (ctrl->ops->has_keyed_sgls)
335 id->sgls |= cpu_to_le32(1 << 2);
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700336 if (req->port->inline_data_size)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200337 id->sgls |= cpu_to_le32(1 << 20);
338
339 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
340
341 /* Max command capsule size is sqe + single page of in-capsule data */
342 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700343 req->port->inline_data_size) / 16);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200344 /* Max response capsule size is cqe */
345 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
346
347 id->msdbd = ctrl->ops->msdbd;
348
Christoph Hellwig72efd252018-07-19 07:35:20 -0700349 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
350 id->anatt = 10; /* random value */
351 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
352 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
353
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200354 /*
355 * Meh, we don't really support any power state. Fake up the same
356 * values that qemu does.
357 */
358 id->psd[0].max_power = cpu_to_le16(0x9c4);
359 id->psd[0].entry_lat = cpu_to_le32(0x10);
360 id->psd[0].exit_lat = cpu_to_le32(0x4);
361
362 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
363
364 kfree(id);
365out:
366 nvmet_req_complete(req, status);
367}
368
369static void nvmet_execute_identify_ns(struct nvmet_req *req)
370{
371 struct nvmet_ns *ns;
372 struct nvme_id_ns *id;
373 u16 status = 0;
374
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200375 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200376 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
377 goto out;
378 }
379
380 id = kzalloc(sizeof(*id), GFP_KERNEL);
381 if (!id) {
382 status = NVME_SC_INTERNAL;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200383 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200384 }
385
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200386 /* return an all zeroed buffer if we can't find an active namespace */
387 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
388 if (!ns)
389 goto done;
390
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200391 /*
Minwoo Im18c53e42017-11-07 21:10:22 +0900392 * nuse = ncap = nsze isn't always true, but we have no way to find
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200393 * that out from the underlying device.
394 */
Christoph Hellwig72efd252018-07-19 07:35:20 -0700395 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
396 switch (req->port->ana_state[ns->anagrpid]) {
397 case NVME_ANA_INACCESSIBLE:
398 case NVME_ANA_PERSISTENT_LOSS:
399 break;
400 default:
401 id->nuse = id->nsze;
402 break;
403 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200404
405 /*
406 * We just provide a single LBA format that matches what the
407 * underlying device reports.
408 */
409 id->nlbaf = 0;
410 id->flbas = 0;
411
412 /*
413 * Our namespace might always be shared. Not just with other
414 * controllers, but also with any other user of the block device.
415 */
416 id->nmic = (1 << 0);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700417 id->anagrpid = cpu_to_le32(ns->anagrpid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200418
Andy Shevchenko1b0d2742018-07-17 17:17:36 +0300419 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200420
421 id->lbaf[0].ds = ns->blksize_shift;
422
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200423 nvmet_put_namespace(ns);
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200424done:
425 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
426 kfree(id);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200427out:
428 nvmet_req_complete(req, status);
429}
430
431static void nvmet_execute_identify_nslist(struct nvmet_req *req)
432{
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200433 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200434 struct nvmet_ctrl *ctrl = req->sq->ctrl;
435 struct nvmet_ns *ns;
436 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
437 __le32 *list;
438 u16 status = 0;
439 int i = 0;
440
441 list = kzalloc(buf_size, GFP_KERNEL);
442 if (!list) {
443 status = NVME_SC_INTERNAL;
444 goto out;
445 }
446
447 rcu_read_lock();
448 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
449 if (ns->nsid <= min_nsid)
450 continue;
451 list[i++] = cpu_to_le32(ns->nsid);
452 if (i == buf_size / sizeof(__le32))
453 break;
454 }
455 rcu_read_unlock();
456
457 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
458
459 kfree(list);
460out:
461 nvmet_req_complete(req, status);
462}
463
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200464static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
465 void *id, off_t *off)
466{
467 struct nvme_ns_id_desc desc = {
468 .nidt = type,
469 .nidl = len,
470 };
471 u16 status;
472
473 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
474 if (status)
475 return status;
476 *off += sizeof(desc);
477
478 status = nvmet_copy_to_sgl(req, *off, id, len);
479 if (status)
480 return status;
481 *off += len;
482
483 return 0;
484}
485
486static void nvmet_execute_identify_desclist(struct nvmet_req *req)
487{
488 struct nvmet_ns *ns;
489 u16 status = 0;
490 off_t off = 0;
491
492 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
493 if (!ns) {
494 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
495 goto out;
496 }
497
498 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
499 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
500 NVME_NIDT_UUID_LEN,
501 &ns->uuid, &off);
502 if (status)
503 goto out_put_ns;
504 }
505 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
506 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
507 NVME_NIDT_NGUID_LEN,
508 &ns->nguid, &off);
509 if (status)
510 goto out_put_ns;
511 }
512
513 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
514 off) != NVME_IDENTIFY_DATA_SIZE - off)
515 status = NVME_SC_INTERNAL | NVME_SC_DNR;
516out_put_ns:
517 nvmet_put_namespace(ns);
518out:
519 nvmet_req_complete(req, status);
520}
521
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200522/*
Minwoo Im18c53e42017-11-07 21:10:22 +0900523 * A "minimum viable" abort implementation: the command is mandatory in the
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200524 * spec, but we are not required to do any useful work. We couldn't really
525 * do a useful abort, so don't bother even with waiting for the command
526 * to be exectuted and return immediately telling the command to abort
527 * wasn't found.
528 */
529static void nvmet_execute_abort(struct nvmet_req *req)
530{
531 nvmet_set_result(req, 1);
532 nvmet_req_complete(req, 0);
533}
534
535static void nvmet_execute_set_features(struct nvmet_req *req)
536{
537 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
538 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200539 u32 val32;
540 u16 status = 0;
541
Omri Mann28dd5cf2017-08-30 15:22:59 +0300542 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200543 case NVME_FEAT_NUM_QUEUES:
544 nvmet_set_result(req,
545 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
546 break;
547 case NVME_FEAT_KATO:
Daniel Verkamp6c73f942016-12-09 12:59:46 -0700548 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200549 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
550 nvmet_set_result(req, req->sq->ctrl->kato);
551 break;
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200552 case NVME_FEAT_ASYNC_EVENT:
553 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
554 if (val32 & ~NVMET_AEN_CFG_ALL) {
555 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
556 break;
557 }
558
559 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
560 nvmet_set_result(req, val32);
561 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300562 case NVME_FEAT_HOST_ID:
563 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
564 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200565 default:
566 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
567 break;
568 }
569
570 nvmet_req_complete(req, status);
571}
572
573static void nvmet_execute_get_features(struct nvmet_req *req)
574{
575 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
576 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
577 u16 status = 0;
578
Omri Mann28dd5cf2017-08-30 15:22:59 +0300579 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200580 /*
581 * These features are mandatory in the spec, but we don't
582 * have a useful way to implement them. We'll eventually
583 * need to come up with some fake values for these.
584 */
585#if 0
586 case NVME_FEAT_ARBITRATION:
587 break;
588 case NVME_FEAT_POWER_MGMT:
589 break;
590 case NVME_FEAT_TEMP_THRESH:
591 break;
592 case NVME_FEAT_ERR_RECOVERY:
593 break;
594 case NVME_FEAT_IRQ_COALESCE:
595 break;
596 case NVME_FEAT_IRQ_CONFIG:
597 break;
598 case NVME_FEAT_WRITE_ATOMIC:
599 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200600#endif
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200601 case NVME_FEAT_ASYNC_EVENT:
602 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
603 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200604 case NVME_FEAT_VOLATILE_WC:
605 nvmet_set_result(req, 1);
606 break;
607 case NVME_FEAT_NUM_QUEUES:
608 nvmet_set_result(req,
609 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
610 break;
611 case NVME_FEAT_KATO:
612 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
613 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300614 case NVME_FEAT_HOST_ID:
615 /* need 128-bit host identifier flag */
616 if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
617 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
618 break;
619 }
620
621 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
622 sizeof(req->sq->ctrl->hostid));
623 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200624 default:
625 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
626 break;
627 }
628
629 nvmet_req_complete(req, status);
630}
631
632static void nvmet_execute_async_event(struct nvmet_req *req)
633{
634 struct nvmet_ctrl *ctrl = req->sq->ctrl;
635
636 mutex_lock(&ctrl->lock);
637 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
638 mutex_unlock(&ctrl->lock);
639 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
640 return;
641 }
642 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
643 mutex_unlock(&ctrl->lock);
644
645 schedule_work(&ctrl->async_event_work);
646}
647
648static void nvmet_execute_keep_alive(struct nvmet_req *req)
649{
650 struct nvmet_ctrl *ctrl = req->sq->ctrl;
651
652 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
653 ctrl->cntlid, ctrl->kato);
654
655 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
656 nvmet_req_complete(req, 0);
657}
658
Parav Pandit64a0ca82017-02-27 23:21:33 -0600659u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200660{
661 struct nvme_command *cmd = req->cmd;
Parav Pandit64a0ca82017-02-27 23:21:33 -0600662 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200663
Parav Pandit64a0ca82017-02-27 23:21:33 -0600664 ret = nvmet_check_ctrl_status(req, cmd);
665 if (unlikely(ret))
666 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200667
668 switch (cmd->common.opcode) {
669 case nvme_admin_get_log_page:
670 req->data_len = nvmet_get_log_page_len(cmd);
671
672 switch (cmd->get_log_page.lid) {
Max Gurtovoy2ca07862017-03-06 00:30:38 +0200673 case NVME_LOG_ERROR:
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200674 /*
675 * We currently never set the More bit in the status
676 * field, so all error log entries are invalid and can
677 * be zeroed out. This is called a minum viable
678 * implementation (TM) of this mandatory log page.
679 */
680 req->execute = nvmet_execute_get_log_page_noop;
681 return 0;
Max Gurtovoy2ca07862017-03-06 00:30:38 +0200682 case NVME_LOG_SMART:
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200683 req->execute = nvmet_execute_get_log_page_smart;
684 return 0;
Max Gurtovoy2ca07862017-03-06 00:30:38 +0200685 case NVME_LOG_FW_SLOT:
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200686 /*
687 * We only support a single firmware slot which always
688 * is active, so we can zero out the whole firmware slot
689 * log and still claim to fully implement this mandatory
690 * log page.
691 */
692 req->execute = nvmet_execute_get_log_page_noop;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200693 return 0;
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200694 case NVME_LOG_CHANGED_NS:
695 req->execute = nvmet_execute_get_log_changed_ns;
696 return 0;
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400697 case NVME_LOG_CMD_EFFECTS:
698 req->execute = nvmet_execute_get_log_cmd_effects_ns;
699 return 0;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700700 case NVME_LOG_ANA:
701 req->execute = nvmet_execute_get_log_page_ana;
702 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200703 }
704 break;
705 case nvme_admin_identify:
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200706 req->data_len = NVME_IDENTIFY_DATA_SIZE;
Parav Pandit986994a2017-01-26 17:17:28 +0200707 switch (cmd->identify.cns) {
Christoph Hellwige9c93462016-09-30 13:51:10 +0200708 case NVME_ID_CNS_NS:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200709 req->execute = nvmet_execute_identify_ns;
710 return 0;
Christoph Hellwige9c93462016-09-30 13:51:10 +0200711 case NVME_ID_CNS_CTRL:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200712 req->execute = nvmet_execute_identify_ctrl;
713 return 0;
Christoph Hellwige9c93462016-09-30 13:51:10 +0200714 case NVME_ID_CNS_NS_ACTIVE_LIST:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200715 req->execute = nvmet_execute_identify_nslist;
716 return 0;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200717 case NVME_ID_CNS_NS_DESC_LIST:
718 req->execute = nvmet_execute_identify_desclist;
719 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200720 }
721 break;
722 case nvme_admin_abort_cmd:
723 req->execute = nvmet_execute_abort;
724 req->data_len = 0;
725 return 0;
726 case nvme_admin_set_features:
727 req->execute = nvmet_execute_set_features;
728 req->data_len = 0;
729 return 0;
730 case nvme_admin_get_features:
731 req->execute = nvmet_execute_get_features;
732 req->data_len = 0;
733 return 0;
734 case nvme_admin_async_event:
735 req->execute = nvmet_execute_async_event;
736 req->data_len = 0;
737 return 0;
738 case nvme_admin_keep_alive:
739 req->execute = nvmet_execute_keep_alive;
740 req->data_len = 0;
741 return 0;
742 }
743
Parav Pandit64a0ca82017-02-27 23:21:33 -0600744 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
745 req->sq->qid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200746 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
747}