blob: 7a45f4477679ae2600025848101c06c527a9009d [file] [log] [blame]
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001/*
2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010016#include <linux/rculist.h>
17
Christoph Hellwiga07b4972016-06-21 18:04:20 +020018#include <generated/utsrelease.h>
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010019#include <asm/unaligned.h>
Christoph Hellwiga07b4972016-06-21 18:04:20 +020020#include "nvmet.h"
21
Chaitanya Kulkarnib369b302018-07-26 14:00:41 -070022/*
23 * This helper allows us to clear the AEN based on the RAE bit,
24 * Please use this helper when processing the log pages which are
25 * associated with the AEN.
26 */
27static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
28{
29 int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
30
31 if (!rae)
32 clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
33}
34
Christoph Hellwiga07b4972016-06-21 18:04:20 +020035u32 nvmet_get_log_page_len(struct nvme_command *cmd)
36{
37 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
38
39 len <<= 16;
40 len += le16_to_cpu(cmd->get_log_page.numdl);
41 /* NUMD is a 0's based value */
42 len += 1;
43 len *= sizeof(u32);
44
45 return len;
46}
47
Christoph Hellwig8ab08052018-05-22 11:10:03 +020048static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
49{
50 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
51}
52
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010053static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
54 struct nvme_smart_log *slog)
55{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010056 struct nvmet_ns *ns;
57 u64 host_reads, host_writes, data_units_read, data_units_written;
58
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010059 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
60 if (!ns) {
Chaitanya Kulkarnid93cb392018-09-10 17:39:33 -070061 pr_err("Could not find namespace id : %d\n",
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010062 le32_to_cpu(req->cmd->get_log_page.nsid));
Sagi Grimberg4185f252017-11-08 12:00:30 +020063 return NVME_SC_INVALID_NS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010064 }
65
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040066 /* we don't have the right data for file backed ns */
67 if (!ns->bdev)
68 goto out;
69
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010070 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
71 data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
72 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
73 data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
74
75 put_unaligned_le64(host_reads, &slog->host_reads[0]);
76 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
77 put_unaligned_le64(host_writes, &slog->host_writes[0]);
78 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040079out:
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010080 nvmet_put_namespace(ns);
Sagi Grimberg4185f252017-11-08 12:00:30 +020081
82 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010083}
84
85static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
86 struct nvme_smart_log *slog)
87{
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010088 u64 host_reads = 0, host_writes = 0;
89 u64 data_units_read = 0, data_units_written = 0;
90 struct nvmet_ns *ns;
91 struct nvmet_ctrl *ctrl;
92
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +010093 ctrl = req->sq->ctrl;
94
95 rcu_read_lock();
96 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040097 /* we don't have the right data for file backed ns */
98 if (!ns->bdev)
99 continue;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100100 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
101 data_units_read +=
102 part_stat_read(ns->bdev->bd_part, sectors[READ]);
103 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
104 data_units_written +=
105 part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
106
107 }
108 rcu_read_unlock();
109
110 put_unaligned_le64(host_reads, &slog->host_reads[0]);
111 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
112 put_unaligned_le64(host_writes, &slog->host_writes[0]);
113 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
114
Sagi Grimberg4185f252017-11-08 12:00:30 +0200115 return NVME_SC_SUCCESS;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100116}
117
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200118static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100119{
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200120 struct nvme_smart_log *log;
121 u16 status = NVME_SC_INTERNAL;
Chaitanya Kulkarni2d79c7d2016-09-01 20:45:03 +0100122
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200123 if (req->data_len != sizeof(*log))
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200124 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200125
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200126 log = kzalloc(sizeof(*log), GFP_KERNEL);
127 if (!log)
128 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200129
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200130 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
131 status = nvmet_get_smart_log_all(req, log);
132 else
133 status = nvmet_get_smart_log_nsid(req, log);
134 if (status)
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400135 goto out_free_log;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200136
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200137 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
Chaitanya Kulkarnic42d7a32018-06-11 03:20:24 -0400138out_free_log:
139 kfree(log);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200140out:
141 nvmet_req_complete(req, status);
142}
143
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400144static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
145{
146 u16 status = NVME_SC_INTERNAL;
147 struct nvme_effects_log *log;
148
149 log = kzalloc(sizeof(*log), GFP_KERNEL);
150 if (!log)
151 goto out;
152
153 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
154 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
155 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
156 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
157 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
158 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
159 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
160
161 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
162 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
163 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
164 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
165 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
166
167 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
168
169 kfree(log);
170out:
171 nvmet_req_complete(req, status);
172}
173
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200174static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
175{
176 struct nvmet_ctrl *ctrl = req->sq->ctrl;
177 u16 status = NVME_SC_INTERNAL;
178 size_t len;
179
180 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
181 goto out;
182
183 mutex_lock(&ctrl->lock);
184 if (ctrl->nr_changed_ns == U32_MAX)
185 len = sizeof(__le32);
186 else
187 len = ctrl->nr_changed_ns * sizeof(__le32);
188 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
189 if (!status)
190 status = nvmet_zero_sgl(req, len, req->data_len - len);
191 ctrl->nr_changed_ns = 0;
Chaitanya Kulkarnib369b302018-07-26 14:00:41 -0700192 nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200193 mutex_unlock(&ctrl->lock);
194out:
195 nvmet_req_complete(req, status);
196}
197
Christoph Hellwig72efd252018-07-19 07:35:20 -0700198static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
199 struct nvme_ana_group_desc *desc)
200{
201 struct nvmet_ctrl *ctrl = req->sq->ctrl;
202 struct nvmet_ns *ns;
203 u32 count = 0;
204
205 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
206 rcu_read_lock();
207 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
208 if (ns->anagrpid == grpid)
209 desc->nsids[count++] = cpu_to_le32(ns->nsid);
210 rcu_read_unlock();
211 }
212
213 desc->grpid = cpu_to_le32(grpid);
214 desc->nnsids = cpu_to_le32(count);
215 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
216 desc->state = req->port->ana_state[grpid];
217 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
218 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
219}
220
221static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
222{
223 struct nvme_ana_rsp_hdr hdr = { 0, };
224 struct nvme_ana_group_desc *desc;
225 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
226 size_t len;
227 u32 grpid;
228 u16 ngrps = 0;
229 u16 status;
230
231 status = NVME_SC_INTERNAL;
232 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
233 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
234 if (!desc)
235 goto out;
236
237 down_read(&nvmet_ana_sem);
238 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
239 if (!nvmet_ana_group_enabled[grpid])
240 continue;
241 len = nvmet_format_ana_group(req, grpid, desc);
242 status = nvmet_copy_to_sgl(req, offset, desc, len);
243 if (status)
244 break;
245 offset += len;
246 ngrps++;
247 }
Hannes Reineckebe1277f2018-07-16 12:58:33 +0200248 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
249 if (nvmet_ana_group_enabled[grpid])
250 ngrps++;
251 }
Christoph Hellwig72efd252018-07-19 07:35:20 -0700252
253 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
254 hdr.ngrps = cpu_to_le16(ngrps);
Chaitanya Kulkarnib369b302018-07-26 14:00:41 -0700255 nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700256 up_read(&nvmet_ana_sem);
257
258 kfree(desc);
259
260 /* copy the header last once we know the number of groups */
261 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
262out:
263 nvmet_req_complete(req, status);
264}
265
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200266static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
267{
268 struct nvmet_ctrl *ctrl = req->sq->ctrl;
269 struct nvme_id_ctrl *id;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200270 u16 status = 0;
Martin Wilck42de82a2017-07-14 00:25:31 +0200271 const char model[] = "Linux";
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200272
273 id = kzalloc(sizeof(*id), GFP_KERNEL);
274 if (!id) {
275 status = NVME_SC_INTERNAL;
276 goto out;
277 }
278
279 /* XXX: figure out how to assign real vendors IDs. */
280 id->vid = 0;
281 id->ssvid = 0;
282
Daniel Verkampc7399692018-04-12 09:16:13 -0600283 memset(id->sn, ' ', sizeof(id->sn));
Martin Wilck42de82a2017-07-14 00:25:31 +0200284 bin2hex(id->sn, &ctrl->subsys->serial,
285 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
Martin Wilck17c39d02017-08-14 22:12:39 +0200286 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
287 memcpy_and_pad(id->fr, sizeof(id->fr),
288 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200289
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200290 id->rab = 6;
291
292 /*
293 * XXX: figure out how we can assign a IEEE OUI, but until then
294 * the safest is to leave it as zeroes.
295 */
296
Christoph Hellwig72efd252018-07-19 07:35:20 -0700297 /* we support multiple ports, multiples hosts and ANA: */
298 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200299
300 /* no limit on data transfer sizes for now */
301 id->mdts = 0;
302 id->cntlid = cpu_to_le16(ctrl->cntlid);
303 id->ver = cpu_to_le32(ctrl->subsys->ver);
304
305 /* XXX: figure out what to do about RTD3R/RTD3 */
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200306 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200307 id->ctratt = cpu_to_le32(1 << 0);
308
309 id->oacs = 0;
310
311 /*
312 * We don't really have a practical limit on the number of abort
313 * comands. But we don't do anything useful for abort either, so
314 * no point in allowing more abort commands than the spec requires.
315 */
316 id->acl = 3;
317
318 id->aerl = NVMET_ASYNC_EVENTS - 1;
319
320 /* first slot is read-only, only one slot supported */
321 id->frmw = (1 << 0) | (1 << 1);
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400322 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200323 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
324 id->npss = 0;
325
326 /* We support keep-alive timeout in granularity of seconds */
327 id->kas = cpu_to_le16(NVMET_KAS);
328
329 id->sqes = (0x6 << 4) | 0x6;
330 id->cqes = (0x4 << 4) | 0x4;
331
332 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
333 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
334
335 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200336 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800337 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
338 NVME_CTRL_ONCS_WRITE_ZEROES);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200339
340 /* XXX: don't report vwc if the underlying device is write through */
341 id->vwc = NVME_CTRL_VWC_PRESENT;
342
343 /*
344 * We can't support atomic writes bigger than a LBA without support
345 * from the backend device.
346 */
347 id->awun = 0;
348 id->awupf = 0;
349
350 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
351 if (ctrl->ops->has_keyed_sgls)
352 id->sgls |= cpu_to_le32(1 << 2);
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700353 if (req->port->inline_data_size)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200354 id->sgls |= cpu_to_le32(1 << 20);
355
356 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
357
358 /* Max command capsule size is sqe + single page of in-capsule data */
359 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700360 req->port->inline_data_size) / 16);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200361 /* Max response capsule size is cqe */
362 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
363
364 id->msdbd = ctrl->ops->msdbd;
365
Christoph Hellwig72efd252018-07-19 07:35:20 -0700366 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
367 id->anatt = 10; /* random value */
368 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
369 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
370
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200371 /*
372 * Meh, we don't really support any power state. Fake up the same
373 * values that qemu does.
374 */
375 id->psd[0].max_power = cpu_to_le16(0x9c4);
376 id->psd[0].entry_lat = cpu_to_le32(0x10);
377 id->psd[0].exit_lat = cpu_to_le32(0x4);
378
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700379 id->nwpc = 1 << 0; /* write protect and no write protect */
380
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200381 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
382
383 kfree(id);
384out:
385 nvmet_req_complete(req, status);
386}
387
388static void nvmet_execute_identify_ns(struct nvmet_req *req)
389{
390 struct nvmet_ns *ns;
391 struct nvme_id_ns *id;
392 u16 status = 0;
393
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200394 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200395 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
396 goto out;
397 }
398
399 id = kzalloc(sizeof(*id), GFP_KERNEL);
400 if (!id) {
401 status = NVME_SC_INTERNAL;
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200402 goto out;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200403 }
404
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200405 /* return an all zeroed buffer if we can't find an active namespace */
406 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
407 if (!ns)
408 goto done;
409
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200410 /*
Minwoo Im18c53e42017-11-07 21:10:22 +0900411 * nuse = ncap = nsze isn't always true, but we have no way to find
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200412 * that out from the underlying device.
413 */
Christoph Hellwig72efd252018-07-19 07:35:20 -0700414 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
415 switch (req->port->ana_state[ns->anagrpid]) {
416 case NVME_ANA_INACCESSIBLE:
417 case NVME_ANA_PERSISTENT_LOSS:
418 break;
419 default:
420 id->nuse = id->nsze;
421 break;
422 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200423
424 /*
425 * We just provide a single LBA format that matches what the
426 * underlying device reports.
427 */
428 id->nlbaf = 0;
429 id->flbas = 0;
430
431 /*
432 * Our namespace might always be shared. Not just with other
433 * controllers, but also with any other user of the block device.
434 */
435 id->nmic = (1 << 0);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700436 id->anagrpid = cpu_to_le32(ns->anagrpid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200437
Andy Shevchenko1b0d2742018-07-17 17:17:36 +0300438 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200439
440 id->lbaf[0].ds = ns->blksize_shift;
441
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700442 if (ns->readonly)
443 id->nsattr |= (1 << 0);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200444 nvmet_put_namespace(ns);
Christoph Hellwigf39ae472018-05-31 18:23:48 +0200445done:
446 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
447 kfree(id);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200448out:
449 nvmet_req_complete(req, status);
450}
451
452static void nvmet_execute_identify_nslist(struct nvmet_req *req)
453{
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200454 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200455 struct nvmet_ctrl *ctrl = req->sq->ctrl;
456 struct nvmet_ns *ns;
457 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
458 __le32 *list;
459 u16 status = 0;
460 int i = 0;
461
462 list = kzalloc(buf_size, GFP_KERNEL);
463 if (!list) {
464 status = NVME_SC_INTERNAL;
465 goto out;
466 }
467
468 rcu_read_lock();
469 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
470 if (ns->nsid <= min_nsid)
471 continue;
472 list[i++] = cpu_to_le32(ns->nsid);
473 if (i == buf_size / sizeof(__le32))
474 break;
475 }
476 rcu_read_unlock();
477
478 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
479
480 kfree(list);
481out:
482 nvmet_req_complete(req, status);
483}
484
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200485static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
486 void *id, off_t *off)
487{
488 struct nvme_ns_id_desc desc = {
489 .nidt = type,
490 .nidl = len,
491 };
492 u16 status;
493
494 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
495 if (status)
496 return status;
497 *off += sizeof(desc);
498
499 status = nvmet_copy_to_sgl(req, *off, id, len);
500 if (status)
501 return status;
502 *off += len;
503
504 return 0;
505}
506
507static void nvmet_execute_identify_desclist(struct nvmet_req *req)
508{
509 struct nvmet_ns *ns;
510 u16 status = 0;
511 off_t off = 0;
512
513 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
514 if (!ns) {
515 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
516 goto out;
517 }
518
519 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
520 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
521 NVME_NIDT_UUID_LEN,
522 &ns->uuid, &off);
523 if (status)
524 goto out_put_ns;
525 }
526 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
527 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
528 NVME_NIDT_NGUID_LEN,
529 &ns->nguid, &off);
530 if (status)
531 goto out_put_ns;
532 }
533
534 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
535 off) != NVME_IDENTIFY_DATA_SIZE - off)
536 status = NVME_SC_INTERNAL | NVME_SC_DNR;
537out_put_ns:
538 nvmet_put_namespace(ns);
539out:
540 nvmet_req_complete(req, status);
541}
542
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200543/*
Minwoo Im18c53e42017-11-07 21:10:22 +0900544 * A "minimum viable" abort implementation: the command is mandatory in the
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200545 * spec, but we are not required to do any useful work. We couldn't really
546 * do a useful abort, so don't bother even with waiting for the command
547 * to be exectuted and return immediately telling the command to abort
548 * wasn't found.
549 */
550static void nvmet_execute_abort(struct nvmet_req *req)
551{
552 nvmet_set_result(req, 1);
553 nvmet_req_complete(req, 0);
554}
555
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700556static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
557{
558 u16 status;
559
560 if (req->ns->file)
561 status = nvmet_file_flush(req);
562 else
563 status = nvmet_bdev_flush(req);
564
565 if (status)
566 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
567 return status;
568}
569
570static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
571{
572 u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
573 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
574 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
575
576 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
577 if (unlikely(!req->ns))
578 return status;
579
580 mutex_lock(&subsys->lock);
581 switch (write_protect) {
582 case NVME_NS_WRITE_PROTECT:
583 req->ns->readonly = true;
584 status = nvmet_write_protect_flush_sync(req);
585 if (status)
586 req->ns->readonly = false;
587 break;
588 case NVME_NS_NO_WRITE_PROTECT:
589 req->ns->readonly = false;
590 status = 0;
591 break;
592 default:
593 break;
594 }
595
596 if (!status)
597 nvmet_ns_changed(subsys, req->ns->nsid);
598 mutex_unlock(&subsys->lock);
599 return status;
600}
601
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200602static void nvmet_execute_set_features(struct nvmet_req *req)
603{
604 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
605 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200606 u32 val32;
607 u16 status = 0;
608
Omri Mann28dd5cf2017-08-30 15:22:59 +0300609 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200610 case NVME_FEAT_NUM_QUEUES:
611 nvmet_set_result(req,
612 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
613 break;
614 case NVME_FEAT_KATO:
Daniel Verkamp6c73f942016-12-09 12:59:46 -0700615 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200616 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
617 nvmet_set_result(req, req->sq->ctrl->kato);
618 break;
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200619 case NVME_FEAT_ASYNC_EVENT:
620 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
621 if (val32 & ~NVMET_AEN_CFG_ALL) {
622 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
623 break;
624 }
625
626 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
627 nvmet_set_result(req, val32);
628 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300629 case NVME_FEAT_HOST_ID:
630 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
631 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700632 case NVME_FEAT_WRITE_PROTECT:
633 status = nvmet_set_feat_write_protect(req);
634 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200635 default:
636 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
637 break;
638 }
639
640 nvmet_req_complete(req, status);
641}
642
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700643static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
644{
645 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
646 u32 result;
647
648 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
649 if (!req->ns)
650 return NVME_SC_INVALID_NS | NVME_SC_DNR;
651
652 mutex_lock(&subsys->lock);
653 if (req->ns->readonly == true)
654 result = NVME_NS_WRITE_PROTECT;
655 else
656 result = NVME_NS_NO_WRITE_PROTECT;
657 nvmet_set_result(req, result);
658 mutex_unlock(&subsys->lock);
659
660 return 0;
661}
662
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200663static void nvmet_execute_get_features(struct nvmet_req *req)
664{
665 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
666 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
667 u16 status = 0;
668
Omri Mann28dd5cf2017-08-30 15:22:59 +0300669 switch (cdw10 & 0xff) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200670 /*
671 * These features are mandatory in the spec, but we don't
672 * have a useful way to implement them. We'll eventually
673 * need to come up with some fake values for these.
674 */
675#if 0
676 case NVME_FEAT_ARBITRATION:
677 break;
678 case NVME_FEAT_POWER_MGMT:
679 break;
680 case NVME_FEAT_TEMP_THRESH:
681 break;
682 case NVME_FEAT_ERR_RECOVERY:
683 break;
684 case NVME_FEAT_IRQ_COALESCE:
685 break;
686 case NVME_FEAT_IRQ_CONFIG:
687 break;
688 case NVME_FEAT_WRITE_ATOMIC:
689 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200690#endif
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200691 case NVME_FEAT_ASYNC_EVENT:
692 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
693 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200694 case NVME_FEAT_VOLATILE_WC:
695 nvmet_set_result(req, 1);
696 break;
697 case NVME_FEAT_NUM_QUEUES:
698 nvmet_set_result(req,
699 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
700 break;
701 case NVME_FEAT_KATO:
702 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
703 break;
Omri Mann28dd5cf2017-08-30 15:22:59 +0300704 case NVME_FEAT_HOST_ID:
705 /* need 128-bit host identifier flag */
706 if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
707 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
708 break;
709 }
710
711 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
712 sizeof(req->sq->ctrl->hostid));
713 break;
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700714 case NVME_FEAT_WRITE_PROTECT:
715 status = nvmet_get_feat_write_protect(req);
716 break;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200717 default:
718 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
719 break;
720 }
721
722 nvmet_req_complete(req, status);
723}
724
725static void nvmet_execute_async_event(struct nvmet_req *req)
726{
727 struct nvmet_ctrl *ctrl = req->sq->ctrl;
728
729 mutex_lock(&ctrl->lock);
730 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
731 mutex_unlock(&ctrl->lock);
732 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
733 return;
734 }
735 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
736 mutex_unlock(&ctrl->lock);
737
738 schedule_work(&ctrl->async_event_work);
739}
740
741static void nvmet_execute_keep_alive(struct nvmet_req *req)
742{
743 struct nvmet_ctrl *ctrl = req->sq->ctrl;
744
745 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
746 ctrl->cntlid, ctrl->kato);
747
748 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
749 nvmet_req_complete(req, 0);
750}
751
Parav Pandit64a0ca82017-02-27 23:21:33 -0600752u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200753{
754 struct nvme_command *cmd = req->cmd;
Parav Pandit64a0ca82017-02-27 23:21:33 -0600755 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200756
Parav Pandit64a0ca82017-02-27 23:21:33 -0600757 ret = nvmet_check_ctrl_status(req, cmd);
758 if (unlikely(ret))
759 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200760
761 switch (cmd->common.opcode) {
762 case nvme_admin_get_log_page:
763 req->data_len = nvmet_get_log_page_len(cmd);
764
765 switch (cmd->get_log_page.lid) {
Max Gurtovoy2ca07862017-03-06 00:30:38 +0200766 case NVME_LOG_ERROR:
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200767 /*
768 * We currently never set the More bit in the status
769 * field, so all error log entries are invalid and can
770 * be zeroed out. This is called a minum viable
771 * implementation (TM) of this mandatory log page.
772 */
773 req->execute = nvmet_execute_get_log_page_noop;
774 return 0;
Max Gurtovoy2ca07862017-03-06 00:30:38 +0200775 case NVME_LOG_SMART:
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200776 req->execute = nvmet_execute_get_log_page_smart;
777 return 0;
Max Gurtovoy2ca07862017-03-06 00:30:38 +0200778 case NVME_LOG_FW_SLOT:
Christoph Hellwig8ab08052018-05-22 11:10:03 +0200779 /*
780 * We only support a single firmware slot which always
781 * is active, so we can zero out the whole firmware slot
782 * log and still claim to fully implement this mandatory
783 * log page.
784 */
785 req->execute = nvmet_execute_get_log_page_noop;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200786 return 0;
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200787 case NVME_LOG_CHANGED_NS:
788 req->execute = nvmet_execute_get_log_changed_ns;
789 return 0;
Chaitanya Kulkarni0866bf02018-06-11 13:40:07 -0400790 case NVME_LOG_CMD_EFFECTS:
791 req->execute = nvmet_execute_get_log_cmd_effects_ns;
792 return 0;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700793 case NVME_LOG_ANA:
794 req->execute = nvmet_execute_get_log_page_ana;
795 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200796 }
797 break;
798 case nvme_admin_identify:
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200799 req->data_len = NVME_IDENTIFY_DATA_SIZE;
Parav Pandit986994a2017-01-26 17:17:28 +0200800 switch (cmd->identify.cns) {
Christoph Hellwige9c93462016-09-30 13:51:10 +0200801 case NVME_ID_CNS_NS:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200802 req->execute = nvmet_execute_identify_ns;
803 return 0;
Christoph Hellwige9c93462016-09-30 13:51:10 +0200804 case NVME_ID_CNS_CTRL:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200805 req->execute = nvmet_execute_identify_ctrl;
806 return 0;
Christoph Hellwige9c93462016-09-30 13:51:10 +0200807 case NVME_ID_CNS_NS_ACTIVE_LIST:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200808 req->execute = nvmet_execute_identify_nslist;
809 return 0;
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200810 case NVME_ID_CNS_NS_DESC_LIST:
811 req->execute = nvmet_execute_identify_desclist;
812 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200813 }
814 break;
815 case nvme_admin_abort_cmd:
816 req->execute = nvmet_execute_abort;
817 req->data_len = 0;
818 return 0;
819 case nvme_admin_set_features:
820 req->execute = nvmet_execute_set_features;
821 req->data_len = 0;
822 return 0;
823 case nvme_admin_get_features:
824 req->execute = nvmet_execute_get_features;
825 req->data_len = 0;
826 return 0;
827 case nvme_admin_async_event:
828 req->execute = nvmet_execute_async_event;
829 req->data_len = 0;
830 return 0;
831 case nvme_admin_keep_alive:
832 req->execute = nvmet_execute_keep_alive;
833 req->data_len = 0;
834 return 0;
835 }
836
Parav Pandit64a0ca82017-02-27 23:21:33 -0600837 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
838 req->sq->qid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200839 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
840}