blob: 5ce51ab4c50ea68030a969b995da73ee58efb978 [file] [log] [blame]
Christoph Hellwigfadccd82019-02-18 09:37:13 +01001/* SPDX-License-Identifier: GPL-2.0 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002/*
3 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04004 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05005 */
6
7#ifndef _LINUX_NVME_H
8#define _LINUX_NVME_H
9
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020010#include <linux/types.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020011#include <linux/uuid.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020012
13/* NQN names in commands fields specified one size */
14#define NVMF_NQN_FIELD_LEN 256
15
16/* However the max length of a qualified name is another size */
17#define NVMF_NQN_SIZE 223
18
19#define NVMF_TRSVCID_SIZE 32
20#define NVMF_TRADDR_SIZE 256
21#define NVMF_TSAS_SIZE 256
22
23#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
24
25#define NVME_RDMA_IP_PORT 4420
26
Arnav Dawn62346ea2017-07-12 16:11:53 +053027#define NVME_NSID_ALL 0xffffffff
28
Christoph Hellwigeb793e22016-06-13 16:45:25 +020029enum nvme_subsys_type {
30 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
31 NVME_NQN_NVME = 2, /* NVME type target subsystem */
32};
33
34/* Address Family codes for Discovery Log Page entry ADRFAM field */
35enum {
36 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
37 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
38 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
39 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
40 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
Chaitanya Kulkarnid02abd12020-05-04 01:56:48 -070041 NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */
42 NVMF_ADDR_FAMILY_MAX,
Christoph Hellwigeb793e22016-06-13 16:45:25 +020043};
44
45/* Transport Type codes for Discovery Log Page entry TRTYPE field */
46enum {
47 NVMF_TRTYPE_RDMA = 1, /* RDMA */
48 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
Sagi Grimbergfc221d02018-12-03 17:52:14 -080049 NVMF_TRTYPE_TCP = 3, /* TCP/IP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020050 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
51 NVMF_TRTYPE_MAX,
52};
53
54/* Transport Requirements codes for Discovery Log Page entry TREQ field */
55enum {
Sagi Grimberg9b95d2f2018-11-20 10:34:19 +010056 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
57 NVMF_TREQ_REQUIRED = 1, /* Required */
58 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
Sagi Grimberg0445e1b52018-11-19 14:11:13 -080059#define NVME_TREQ_SECURE_CHANNEL_MASK \
60 (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
Sagi Grimberg9b95d2f2018-11-20 10:34:19 +010061
62 NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020063};
64
65/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
66 * RDMA_QPTYPE field
67 */
68enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080069 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
70 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020071};
72
73/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
74 * RDMA_QPTYPE field
75 */
76enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080077 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
78 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
79 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
80 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
81 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020082};
83
84/* RDMA Connection Management Service Type codes for Discovery Log Page
85 * entry TSAS RDMA_CMS field
86 */
87enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080088 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020089};
90
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030091#define NVME_AQ_DEPTH 32
Keith Busch38dabe22017-11-07 15:13:10 -070092#define NVME_NR_AEN_COMMANDS 1
93#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
94
95/*
96 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
97 * NVM-Express 1.2 specification, section 4.1.2.
98 */
99#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
Christoph Hellwig2812dfe2015-10-09 18:19:20 +0200100
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100101enum {
102 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
103 NVME_REG_VS = 0x0008, /* Version */
104 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800105 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100106 NVME_REG_CC = 0x0014, /* Controller Configuration */
107 NVME_REG_CSTS = 0x001c, /* Controller Status */
108 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
109 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
110 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800111 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600112 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100113 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600114 NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
115 NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
116 NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
117 * Location
118 */
119 NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
120 NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
121 NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
122 NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
123 * Buffer Size
124 */
125 NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
126 * Write Throughput
127 */
Xu Yu97f6ef62017-05-24 16:39:55 +0800128 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500129};
130
Keith Buscha0cadb82012-07-27 13:57:23 -0400131#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400132#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400133#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600134#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600135#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600136#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400137
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600138#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
139#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600140
Christoph Hellwig88de4592017-12-20 14:50:00 +0100141enum {
142 NVME_CMBSZ_SQS = 1 << 0,
143 NVME_CMBSZ_CQS = 1 << 1,
144 NVME_CMBSZ_LISTS = 1 << 2,
145 NVME_CMBSZ_RDS = 1 << 3,
146 NVME_CMBSZ_WDS = 1 << 4,
147
148 NVME_CMBSZ_SZ_SHIFT = 12,
149 NVME_CMBSZ_SZ_MASK = 0xfffff,
150
151 NVME_CMBSZ_SZU_SHIFT = 8,
152 NVME_CMBSZ_SZU_MASK = 0xf,
153};
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600154
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200155/*
156 * Submission and Completion Queue Entry Sizes for the NVM command set.
157 * (In bytes and specified as a power of two (2^n)).
158 */
Benjamin Herrenschmidtc1e0cc72019-08-07 17:51:20 +1000159#define NVME_ADM_SQES 6
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200160#define NVME_NVM_IOSQES 6
161#define NVME_NVM_IOCQES 4
162
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500163enum {
164 NVME_CC_ENABLE = 1 << 0,
165 NVME_CC_CSS_NVM = 0 << 4,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300166 NVME_CC_EN_SHIFT = 0,
167 NVME_CC_CSS_SHIFT = 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500168 NVME_CC_MPS_SHIFT = 7,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300169 NVME_CC_AMS_SHIFT = 11,
170 NVME_CC_SHN_SHIFT = 14,
171 NVME_CC_IOSQES_SHIFT = 16,
172 NVME_CC_IOCQES_SHIFT = 20,
Max Gurtovoy60b43f62017-08-13 19:21:07 +0300173 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
174 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
175 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300176 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
177 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
178 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
179 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
180 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
181 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500182 NVME_CSTS_RDY = 1 << 0,
183 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600184 NVME_CSTS_NSSRO = 1 << 4,
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530185 NVME_CSTS_PP = 1 << 5,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500186 NVME_CSTS_SHST_NORMAL = 0 << 2,
187 NVME_CSTS_SHST_OCCUR = 1 << 2,
188 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600189 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500190};
191
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200192struct nvme_id_power_state {
193 __le16 max_power; /* centiwatts */
194 __u8 rsvd2;
195 __u8 flags;
196 __le32 entry_lat; /* microseconds */
197 __le32 exit_lat; /* microseconds */
198 __u8 read_tput;
199 __u8 read_lat;
200 __u8 write_tput;
201 __u8 write_lat;
202 __le16 idle_power;
203 __u8 idle_scale;
204 __u8 rsvd19;
205 __le16 active_power;
206 __u8 active_work_scale;
207 __u8 rsvd23[9];
208};
209
210enum {
211 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
212 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
213};
214
Sagi Grimberg12b21172018-11-02 10:28:12 -0700215enum nvme_ctrl_attr {
216 NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
Sagi Grimberg6e3ca03e2018-11-02 10:28:15 -0700217 NVME_CTRL_ATTR_TBKAS = (1 << 6),
Sagi Grimberg12b21172018-11-02 10:28:12 -0700218};
219
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200220struct nvme_id_ctrl {
221 __le16 vid;
222 __le16 ssvid;
223 char sn[20];
224 char mn[40];
225 char fr[8];
226 __u8 rab;
227 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200228 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200229 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200230 __le16 cntlid;
231 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200232 __le32 rtd3r;
233 __le32 rtd3e;
234 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200235 __le32 ctratt;
Keith Busch49cd84b2018-11-27 09:40:57 -0700236 __u8 rsvd100[28];
237 __le16 crdt1;
238 __le16 crdt2;
239 __le16 crdt3;
240 __u8 rsvd134[122];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200241 __le16 oacs;
242 __u8 acl;
243 __u8 aerl;
244 __u8 frmw;
245 __u8 lpa;
246 __u8 elpe;
247 __u8 npss;
248 __u8 avscc;
249 __u8 apsta;
250 __le16 wctemp;
251 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200252 __le16 mtfa;
253 __le32 hmpre;
254 __le32 hmmin;
255 __u8 tnvmcap[16];
256 __u8 unvmcap[16];
257 __le32 rpmbs;
Guan Junxiong435e8092017-06-13 09:26:15 +0800258 __le16 edstt;
259 __u8 dsto;
260 __u8 fwug;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200261 __le16 kas;
Guan Junxiong435e8092017-06-13 09:26:15 +0800262 __le16 hctma;
263 __le16 mntmt;
264 __le16 mxtmt;
265 __le32 sanicap;
Christoph Hellwig044a9df2017-09-11 12:09:28 -0400266 __le32 hmminds;
267 __le16 hmmaxd;
Christoph Hellwig1a376212018-05-13 18:53:57 +0200268 __u8 rsvd338[4];
269 __u8 anatt;
270 __u8 anacap;
271 __le32 anagrpmax;
272 __le32 nanagrpid;
273 __u8 rsvd352[160];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200274 __u8 sqes;
275 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200276 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200277 __le32 nn;
278 __le16 oncs;
279 __le16 fuses;
280 __u8 fna;
281 __u8 vwc;
282 __le16 awun;
283 __le16 awupf;
284 __u8 nvscc;
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700285 __u8 nwpc;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200286 __le16 acwu;
287 __u8 rsvd534[2];
288 __le32 sgls;
Christoph Hellwig1a376212018-05-13 18:53:57 +0200289 __le32 mnan;
290 __u8 rsvd544[224];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200291 char subnqn[256];
292 __u8 rsvd1024[768];
293 __le32 ioccsz;
294 __le32 iorcsz;
295 __le16 icdoff;
296 __u8 ctrattr;
297 __u8 msdbd;
298 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200299 struct nvme_id_power_state psd[32];
300 __u8 vs[1024];
301};
302
303enum {
Keith Busch92decf12020-04-03 10:53:46 -0700304 NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
305 NVME_CTRL_CMIC_ANA = 1 << 3,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200306 NVME_CTRL_ONCS_COMPARE = 1 << 0,
307 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
308 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800309 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200310 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200311 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100312 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Jens Axboef5d11842017-06-27 12:03:06 -0600313 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
Changpeng Liu223694b2017-08-31 11:22:49 +0800314 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
Keith Busch84fef622017-11-07 10:28:32 -0700315 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600316 NVME_CTRL_CTRATT_128_ID = 1 << 0,
317 NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
318 NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
319 NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
320 NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
321 NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
322 NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
323 NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200324};
325
326struct nvme_lbaf {
327 __le16 ms;
328 __u8 ds;
329 __u8 rp;
330};
331
332struct nvme_id_ns {
333 __le64 nsze;
334 __le64 ncap;
335 __le64 nuse;
336 __u8 nsfeat;
337 __u8 nlbaf;
338 __u8 flbas;
339 __u8 mc;
340 __u8 dpc;
341 __u8 dps;
342 __u8 nmic;
343 __u8 rescap;
344 __u8 fpi;
Bart Van Assche6605bdd2019-06-28 09:53:29 -0700345 __u8 dlfeat;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200346 __le16 nawun;
347 __le16 nawupf;
348 __le16 nacwu;
349 __le16 nabsn;
350 __le16 nabo;
351 __le16 nabspf;
Scott Bauer6b8190d2017-06-15 10:44:30 -0600352 __le16 noiob;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200353 __u8 nvmcap[16];
Bart Van Assche6605bdd2019-06-28 09:53:29 -0700354 __le16 npwg;
355 __le16 npwa;
356 __le16 npdg;
357 __le16 npda;
358 __le16 nows;
359 __u8 rsvd74[18];
Christoph Hellwig1a376212018-05-13 18:53:57 +0200360 __le32 anagrpid;
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700361 __u8 rsvd96[3];
362 __u8 nsattr;
Bart Van Assche6605bdd2019-06-28 09:53:29 -0700363 __le16 nvmsetid;
364 __le16 endgid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200365 __u8 nguid[16];
366 __u8 eui64[8];
367 struct nvme_lbaf lbaf[16];
368 __u8 rsvd192[192];
369 __u8 vs[3712];
370};
371
372enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200373 NVME_ID_CNS_NS = 0x00,
374 NVME_ID_CNS_CTRL = 0x01,
375 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200376 NVME_ID_CNS_NS_DESC_LIST = 0x03,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200377 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
378 NVME_ID_CNS_NS_PRESENT = 0x11,
379 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
380 NVME_ID_CNS_CTRL_LIST = 0x13,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600381 NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
382 NVME_ID_CNS_NS_GRANULARITY = 0x16,
383 NVME_ID_CNS_UUID_LIST = 0x17,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200384};
385
386enum {
Jens Axboef5d11842017-06-27 12:03:06 -0600387 NVME_DIR_IDENTIFY = 0x00,
388 NVME_DIR_STREAMS = 0x01,
389 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
390 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
391 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
392 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
393 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
394 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
395 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
396 NVME_DIR_ENDIR = 0x01,
397};
398
399enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200400 NVME_NS_FEAT_THIN = 1 << 0,
Keith Busch92decf12020-04-03 10:53:46 -0700401 NVME_NS_FEAT_ATOMICS = 1 << 1,
402 NVME_NS_FEAT_IO_OPT = 1 << 4,
403 NVME_NS_ATTR_RO = 1 << 0,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200404 NVME_NS_FLBAS_LBA_MASK = 0xf,
405 NVME_NS_FLBAS_META_EXT = 0x10,
Keith Busch92decf12020-04-03 10:53:46 -0700406 NVME_NS_NMIC_SHARED = 1 << 0,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200407 NVME_LBAF_RP_BEST = 0,
408 NVME_LBAF_RP_BETTER = 1,
409 NVME_LBAF_RP_GOOD = 2,
410 NVME_LBAF_RP_DEGRADED = 3,
411 NVME_NS_DPC_PI_LAST = 1 << 4,
412 NVME_NS_DPC_PI_FIRST = 1 << 3,
413 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
414 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
415 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
416 NVME_NS_DPS_PI_FIRST = 1 << 3,
417 NVME_NS_DPS_PI_MASK = 0x7,
418 NVME_NS_DPS_PI_TYPE1 = 1,
419 NVME_NS_DPS_PI_TYPE2 = 2,
420 NVME_NS_DPS_PI_TYPE3 = 3,
421};
422
Israel Rukshin39481fb2020-05-19 17:06:00 +0300423/* Identify Namespace Metadata Capabilities (MC): */
424enum {
425 NVME_MC_EXTENDED_LBA = (1 << 0),
426 NVME_MC_METADATA_PTR = (1 << 1),
427};
428
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200429struct nvme_ns_id_desc {
430 __u8 nidt;
431 __u8 nidl;
432 __le16 reserved;
433};
434
435#define NVME_NIDT_EUI64_LEN 8
436#define NVME_NIDT_NGUID_LEN 16
437#define NVME_NIDT_UUID_LEN 16
438
439enum {
440 NVME_NIDT_EUI64 = 0x01,
441 NVME_NIDT_NGUID = 0x02,
442 NVME_NIDT_UUID = 0x03,
443};
444
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200445struct nvme_smart_log {
446 __u8 critical_warning;
447 __u8 temperature[2];
448 __u8 avail_spare;
449 __u8 spare_thresh;
450 __u8 percent_used;
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600451 __u8 endu_grp_crit_warn_sumry;
452 __u8 rsvd7[25];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200453 __u8 data_units_read[16];
454 __u8 data_units_written[16];
455 __u8 host_reads[16];
456 __u8 host_writes[16];
457 __u8 ctrl_busy_time[16];
458 __u8 power_cycles[16];
459 __u8 power_on_hours[16];
460 __u8 unsafe_shutdowns[16];
461 __u8 media_errors[16];
462 __u8 num_err_log_entries[16];
463 __le32 warning_temp_time;
464 __le32 critical_comp_time;
465 __le16 temp_sensor[8];
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600466 __le32 thm_temp1_trans_count;
467 __le32 thm_temp2_trans_count;
468 __le32 thm_temp1_total_time;
469 __le32 thm_temp2_total_time;
470 __u8 rsvd232[280];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200471};
472
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530473struct nvme_fw_slot_info_log {
474 __u8 afi;
475 __u8 rsvd1[7];
476 __le64 frs[7];
477 __u8 rsvd64[448];
478};
479
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200480enum {
Keith Busch84fef622017-11-07 10:28:32 -0700481 NVME_CMD_EFFECTS_CSUPP = 1 << 0,
482 NVME_CMD_EFFECTS_LBCC = 1 << 1,
483 NVME_CMD_EFFECTS_NCC = 1 << 2,
484 NVME_CMD_EFFECTS_NIC = 1 << 3,
485 NVME_CMD_EFFECTS_CCC = 1 << 4,
486 NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600487 NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
Keith Busch84fef622017-11-07 10:28:32 -0700488};
489
490struct nvme_effects_log {
491 __le32 acs[256];
492 __le32 iocs[256];
493 __u8 resv[2048];
494};
495
Christoph Hellwig1a376212018-05-13 18:53:57 +0200496enum nvme_ana_state {
497 NVME_ANA_OPTIMIZED = 0x01,
498 NVME_ANA_NONOPTIMIZED = 0x02,
499 NVME_ANA_INACCESSIBLE = 0x03,
500 NVME_ANA_PERSISTENT_LOSS = 0x04,
501 NVME_ANA_CHANGE = 0x0f,
502};
503
504struct nvme_ana_group_desc {
505 __le32 grpid;
506 __le32 nnsids;
507 __le64 chgcnt;
508 __u8 state;
Hannes Reinecke8b92d0e2018-08-08 08:35:29 +0200509 __u8 rsvd17[15];
Christoph Hellwig1a376212018-05-13 18:53:57 +0200510 __le32 nsids[];
511};
512
513/* flag for the log specific field of the ANA log */
514#define NVME_ANA_LOG_RGO (1 << 0)
515
516struct nvme_ana_rsp_hdr {
517 __le64 chgcnt;
518 __le16 ngrps;
519 __le16 rsvd10[3];
520};
521
Keith Busch84fef622017-11-07 10:28:32 -0700522enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200523 NVME_SMART_CRIT_SPARE = 1 << 0,
524 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
525 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
526 NVME_SMART_CRIT_MEDIA = 1 << 3,
527 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
528};
529
530enum {
Keith Busche3d78742017-11-07 15:13:14 -0700531 NVME_AER_ERROR = 0,
532 NVME_AER_SMART = 1,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200533 NVME_AER_NOTICE = 2,
Keith Busche3d78742017-11-07 15:13:14 -0700534 NVME_AER_CSS = 6,
535 NVME_AER_VS = 7,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200536};
537
538enum {
539 NVME_AER_NOTICE_NS_CHANGED = 0x00,
540 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
Christoph Hellwig1a376212018-05-13 18:53:57 +0200541 NVME_AER_NOTICE_ANA = 0x03,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800542 NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200543};
544
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200545enum {
Jay Sternberg7114dde2018-11-12 13:56:34 -0800546 NVME_AEN_BIT_NS_ATTR = 8,
547 NVME_AEN_BIT_FW_ACT = 9,
548 NVME_AEN_BIT_ANA_CHANGE = 11,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800549 NVME_AEN_BIT_DISC_CHANGE = 31,
Jay Sternberg7114dde2018-11-12 13:56:34 -0800550};
551
552enum {
553 NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
554 NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
555 NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800556 NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200557};
558
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200559struct nvme_lba_range_type {
560 __u8 type;
561 __u8 attributes;
562 __u8 rsvd2[14];
563 __u64 slba;
564 __u64 nlb;
565 __u8 guid[16];
566 __u8 rsvd48[16];
567};
568
569enum {
570 NVME_LBART_TYPE_FS = 0x01,
571 NVME_LBART_TYPE_RAID = 0x02,
572 NVME_LBART_TYPE_CACHE = 0x03,
573 NVME_LBART_TYPE_SWAP = 0x04,
574
575 NVME_LBART_ATTRIB_TEMP = 1 << 0,
576 NVME_LBART_ATTRIB_HIDE = 1 << 1,
577};
578
579struct nvme_reservation_status {
580 __le32 gen;
581 __u8 rtype;
582 __u8 regctl[2];
583 __u8 resv5[2];
584 __u8 ptpls;
585 __u8 resv10[13];
586 struct {
587 __le16 cntlid;
588 __u8 rcsts;
589 __u8 resv3[5];
590 __le64 hostid;
591 __le64 rkey;
592 } regctl_ds[];
593};
594
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200595enum nvme_async_event_type {
596 NVME_AER_TYPE_ERROR = 0,
597 NVME_AER_TYPE_SMART = 1,
598 NVME_AER_TYPE_NOTICE = 2,
599};
600
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200601/* I/O commands */
602
603enum nvme_opcode {
604 nvme_cmd_flush = 0x00,
605 nvme_cmd_write = 0x01,
606 nvme_cmd_read = 0x02,
607 nvme_cmd_write_uncor = 0x04,
608 nvme_cmd_compare = 0x05,
609 nvme_cmd_write_zeroes = 0x08,
610 nvme_cmd_dsm = 0x09,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600611 nvme_cmd_verify = 0x0c,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200612 nvme_cmd_resv_register = 0x0d,
613 nvme_cmd_resv_report = 0x0e,
614 nvme_cmd_resv_acquire = 0x11,
615 nvme_cmd_resv_release = 0x15,
616};
617
Minwoo Im26f29902019-06-12 21:45:30 +0900618#define nvme_opcode_name(opcode) { opcode, #opcode }
619#define show_nvm_opcode_name(val) \
620 __print_symbolic(val, \
621 nvme_opcode_name(nvme_cmd_flush), \
622 nvme_opcode_name(nvme_cmd_write), \
623 nvme_opcode_name(nvme_cmd_read), \
624 nvme_opcode_name(nvme_cmd_write_uncor), \
625 nvme_opcode_name(nvme_cmd_compare), \
626 nvme_opcode_name(nvme_cmd_write_zeroes), \
627 nvme_opcode_name(nvme_cmd_dsm), \
628 nvme_opcode_name(nvme_cmd_resv_register), \
629 nvme_opcode_name(nvme_cmd_resv_report), \
630 nvme_opcode_name(nvme_cmd_resv_acquire), \
631 nvme_opcode_name(nvme_cmd_resv_release))
632
633
James Smart3972be22016-06-06 23:20:47 +0200634/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200635 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
636 *
637 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
638 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
James Smartd85cf202017-09-07 13:20:23 -0700639 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200640 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
641 * request subtype
642 */
643enum {
644 NVME_SGL_FMT_ADDRESS = 0x00,
645 NVME_SGL_FMT_OFFSET = 0x01,
James Smartd85cf202017-09-07 13:20:23 -0700646 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200647 NVME_SGL_FMT_INVALIDATE = 0x0f,
648};
649
650/*
651 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
652 *
653 * For struct nvme_sgl_desc:
654 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
655 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
656 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
657 *
658 * For struct nvme_keyed_sgl_desc:
659 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
James Smartd85cf202017-09-07 13:20:23 -0700660 *
661 * Transport-specific SGL types:
662 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200663 */
664enum {
665 NVME_SGL_FMT_DATA_DESC = 0x00,
666 NVME_SGL_FMT_SEG_DESC = 0x02,
667 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
668 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
James Smartd85cf202017-09-07 13:20:23 -0700669 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200670};
671
672struct nvme_sgl_desc {
673 __le64 addr;
674 __le32 length;
675 __u8 rsvd[3];
676 __u8 type;
677};
678
679struct nvme_keyed_sgl_desc {
680 __le64 addr;
681 __u8 length[3];
682 __u8 key[4];
683 __u8 type;
684};
685
686union nvme_data_ptr {
687 struct {
688 __le64 prp1;
689 __le64 prp2;
690 };
691 struct nvme_sgl_desc sgl;
692 struct nvme_keyed_sgl_desc ksgl;
693};
694
695/*
James Smart3972be22016-06-06 23:20:47 +0200696 * Lowest two bits of our flags field (FUSE field in the spec):
697 *
698 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
699 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
700 *
701 * Highest two bits in our flags field (PSDT field in the spec):
702 *
703 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
704 * If used, MPTR contains addr of single physical buffer (byte aligned).
705 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
706 * If used, MPTR contains an address of an SGL segment containing
707 * exactly 1 SGL descriptor (qword aligned).
708 */
709enum {
710 NVME_CMD_FUSE_FIRST = (1 << 0),
711 NVME_CMD_FUSE_SECOND = (1 << 1),
712
713 NVME_CMD_SGL_METABUF = (1 << 6),
714 NVME_CMD_SGL_METASEG = (1 << 7),
715 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
716};
717
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200718struct nvme_common_command {
719 __u8 opcode;
720 __u8 flags;
721 __u16 command_id;
722 __le32 nsid;
723 __le32 cdw2[2];
724 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200725 union nvme_data_ptr dptr;
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800726 __le32 cdw10;
727 __le32 cdw11;
728 __le32 cdw12;
729 __le32 cdw13;
730 __le32 cdw14;
731 __le32 cdw15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200732};
733
734struct nvme_rw_command {
735 __u8 opcode;
736 __u8 flags;
737 __u16 command_id;
738 __le32 nsid;
739 __u64 rsvd2;
740 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200741 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200742 __le64 slba;
743 __le16 length;
744 __le16 control;
745 __le32 dsmgmt;
746 __le32 reftag;
747 __le16 apptag;
748 __le16 appmask;
749};
750
751enum {
752 NVME_RW_LR = 1 << 15,
753 NVME_RW_FUA = 1 << 14,
754 NVME_RW_DSM_FREQ_UNSPEC = 0,
755 NVME_RW_DSM_FREQ_TYPICAL = 1,
756 NVME_RW_DSM_FREQ_RARE = 2,
757 NVME_RW_DSM_FREQ_READS = 3,
758 NVME_RW_DSM_FREQ_WRITES = 4,
759 NVME_RW_DSM_FREQ_RW = 5,
760 NVME_RW_DSM_FREQ_ONCE = 6,
761 NVME_RW_DSM_FREQ_PREFETCH = 7,
762 NVME_RW_DSM_FREQ_TEMP = 8,
763 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
764 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
765 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
766 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
767 NVME_RW_DSM_SEQ_REQ = 1 << 6,
768 NVME_RW_DSM_COMPRESSED = 1 << 7,
769 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
770 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
771 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
772 NVME_RW_PRINFO_PRACT = 1 << 13,
Jens Axboef5d11842017-06-27 12:03:06 -0600773 NVME_RW_DTYPE_STREAMS = 1 << 4,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200774};
775
776struct nvme_dsm_cmd {
777 __u8 opcode;
778 __u8 flags;
779 __u16 command_id;
780 __le32 nsid;
781 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200782 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200783 __le32 nr;
784 __le32 attributes;
785 __u32 rsvd12[4];
786};
787
788enum {
789 NVME_DSMGMT_IDR = 1 << 0,
790 NVME_DSMGMT_IDW = 1 << 1,
791 NVME_DSMGMT_AD = 1 << 2,
792};
793
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100794#define NVME_DSM_MAX_RANGES 256
795
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200796struct nvme_dsm_range {
797 __le32 cattr;
798 __le32 nlb;
799 __le64 slba;
800};
801
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800802struct nvme_write_zeroes_cmd {
803 __u8 opcode;
804 __u8 flags;
805 __u16 command_id;
806 __le32 nsid;
807 __u64 rsvd2;
808 __le64 metadata;
809 union nvme_data_ptr dptr;
810 __le64 slba;
811 __le16 length;
812 __le16 control;
813 __le32 dsmgmt;
814 __le32 reftag;
815 __le16 apptag;
816 __le16 appmask;
817};
818
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800819/* Features */
820
Akinobu Mita52deba02019-11-15 00:40:00 +0900821enum {
822 NVME_TEMP_THRESH_MASK = 0xffff,
823 NVME_TEMP_THRESH_SELECT_SHIFT = 16,
824 NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
825};
826
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800827struct nvme_feat_auto_pst {
828 __le64 entries[32];
829};
830
Christoph Hellwig39673e12017-01-09 15:36:28 +0100831enum {
832 NVME_HOST_MEM_ENABLE = (1 << 0),
833 NVME_HOST_MEM_RETURN = (1 << 1),
834};
835
Keith Busch49cd84b2018-11-27 09:40:57 -0700836struct nvme_feat_host_behavior {
837 __u8 acre;
838 __u8 resv1[511];
839};
840
841enum {
842 NVME_ENABLE_ACRE = 1,
843};
844
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200845/* Admin commands */
846
847enum nvme_admin_opcode {
848 nvme_admin_delete_sq = 0x00,
849 nvme_admin_create_sq = 0x01,
850 nvme_admin_get_log_page = 0x02,
851 nvme_admin_delete_cq = 0x04,
852 nvme_admin_create_cq = 0x05,
853 nvme_admin_identify = 0x06,
854 nvme_admin_abort_cmd = 0x08,
855 nvme_admin_set_features = 0x09,
856 nvme_admin_get_features = 0x0a,
857 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200858 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200859 nvme_admin_activate_fw = 0x10,
860 nvme_admin_download_fw = 0x11,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600861 nvme_admin_dev_self_test = 0x14,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200862 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200863 nvme_admin_keep_alive = 0x18,
Jens Axboef5d11842017-06-27 12:03:06 -0600864 nvme_admin_directive_send = 0x19,
865 nvme_admin_directive_recv = 0x1a,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600866 nvme_admin_virtual_mgmt = 0x1c,
867 nvme_admin_nvme_mi_send = 0x1d,
868 nvme_admin_nvme_mi_recv = 0x1e,
Helen Koikef9f38e32017-04-10 12:51:07 -0300869 nvme_admin_dbbuf = 0x7C,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200870 nvme_admin_format_nvm = 0x80,
871 nvme_admin_security_send = 0x81,
872 nvme_admin_security_recv = 0x82,
Keith Busch84fef622017-11-07 10:28:32 -0700873 nvme_admin_sanitize_nvm = 0x84,
Minwoo Imc6389842019-08-04 16:50:47 +0900874 nvme_admin_get_lba_status = 0x86,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200875};
876
Minwoo Im26f29902019-06-12 21:45:30 +0900877#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
878#define show_admin_opcode_name(val) \
879 __print_symbolic(val, \
880 nvme_admin_opcode_name(nvme_admin_delete_sq), \
881 nvme_admin_opcode_name(nvme_admin_create_sq), \
882 nvme_admin_opcode_name(nvme_admin_get_log_page), \
883 nvme_admin_opcode_name(nvme_admin_delete_cq), \
884 nvme_admin_opcode_name(nvme_admin_create_cq), \
885 nvme_admin_opcode_name(nvme_admin_identify), \
886 nvme_admin_opcode_name(nvme_admin_abort_cmd), \
887 nvme_admin_opcode_name(nvme_admin_set_features), \
888 nvme_admin_opcode_name(nvme_admin_get_features), \
889 nvme_admin_opcode_name(nvme_admin_async_event), \
890 nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
891 nvme_admin_opcode_name(nvme_admin_activate_fw), \
892 nvme_admin_opcode_name(nvme_admin_download_fw), \
893 nvme_admin_opcode_name(nvme_admin_ns_attach), \
894 nvme_admin_opcode_name(nvme_admin_keep_alive), \
895 nvme_admin_opcode_name(nvme_admin_directive_send), \
896 nvme_admin_opcode_name(nvme_admin_directive_recv), \
897 nvme_admin_opcode_name(nvme_admin_dbbuf), \
898 nvme_admin_opcode_name(nvme_admin_format_nvm), \
899 nvme_admin_opcode_name(nvme_admin_security_send), \
900 nvme_admin_opcode_name(nvme_admin_security_recv), \
Minwoo Ima5ef7572019-08-04 16:50:48 +0900901 nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \
902 nvme_admin_opcode_name(nvme_admin_get_lba_status))
Minwoo Im26f29902019-06-12 21:45:30 +0900903
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200904enum {
905 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
906 NVME_CQ_IRQ_ENABLED = (1 << 1),
907 NVME_SQ_PRIO_URGENT = (0 << 1),
908 NVME_SQ_PRIO_HIGH = (1 << 1),
909 NVME_SQ_PRIO_MEDIUM = (2 << 1),
910 NVME_SQ_PRIO_LOW = (3 << 1),
911 NVME_FEAT_ARBITRATION = 0x01,
912 NVME_FEAT_POWER_MGMT = 0x02,
913 NVME_FEAT_LBA_RANGE = 0x03,
914 NVME_FEAT_TEMP_THRESH = 0x04,
915 NVME_FEAT_ERR_RECOVERY = 0x05,
916 NVME_FEAT_VOLATILE_WC = 0x06,
917 NVME_FEAT_NUM_QUEUES = 0x07,
918 NVME_FEAT_IRQ_COALESCE = 0x08,
919 NVME_FEAT_IRQ_CONFIG = 0x09,
920 NVME_FEAT_WRITE_ATOMIC = 0x0a,
921 NVME_FEAT_ASYNC_EVENT = 0x0b,
922 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200923 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200924 NVME_FEAT_TIMESTAMP = 0x0e,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200925 NVME_FEAT_KATO = 0x0f,
Revanth Rajashekar40c6f9c2018-06-15 12:39:27 -0600926 NVME_FEAT_HCTM = 0x10,
927 NVME_FEAT_NOPSC = 0x11,
928 NVME_FEAT_RRL = 0x12,
929 NVME_FEAT_PLM_CONFIG = 0x13,
930 NVME_FEAT_PLM_WINDOW = 0x14,
Keith Busch49cd84b2018-11-27 09:40:57 -0700931 NVME_FEAT_HOST_BEHAVIOR = 0x16,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600932 NVME_FEAT_SANITIZE = 0x17,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200933 NVME_FEAT_SW_PROGRESS = 0x80,
934 NVME_FEAT_HOST_ID = 0x81,
935 NVME_FEAT_RESV_MASK = 0x82,
936 NVME_FEAT_RESV_PERSIST = 0x83,
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700937 NVME_FEAT_WRITE_PROTECT = 0x84,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200938 NVME_LOG_ERROR = 0x01,
939 NVME_LOG_SMART = 0x02,
940 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200941 NVME_LOG_CHANGED_NS = 0x04,
Keith Busch84fef622017-11-07 10:28:32 -0700942 NVME_LOG_CMD_EFFECTS = 0x05,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -0600943 NVME_LOG_DEVICE_SELF_TEST = 0x06,
944 NVME_LOG_TELEMETRY_HOST = 0x07,
945 NVME_LOG_TELEMETRY_CTRL = 0x08,
946 NVME_LOG_ENDURANCE_GROUP = 0x09,
Christoph Hellwig1a376212018-05-13 18:53:57 +0200947 NVME_LOG_ANA = 0x0c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200948 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200949 NVME_LOG_RESERVATION = 0x80,
950 NVME_FWACT_REPL = (0 << 3),
951 NVME_FWACT_REPL_ACTV = (1 << 3),
952 NVME_FWACT_ACTV = (2 << 3),
953};
954
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700955/* NVMe Namespace Write Protect State */
956enum {
957 NVME_NS_NO_WRITE_PROTECT = 0,
958 NVME_NS_WRITE_PROTECT,
959 NVME_NS_WRITE_PROTECT_POWER_CYCLE,
960 NVME_NS_WRITE_PROTECT_PERMANENT,
961};
962
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200963#define NVME_MAX_CHANGED_NAMESPACES 1024
964
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200965struct nvme_identify {
966 __u8 opcode;
967 __u8 flags;
968 __u16 command_id;
969 __le32 nsid;
970 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200971 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200972 __u8 cns;
973 __u8 rsvd3;
974 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200975 __u32 rsvd11[5];
976};
977
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200978#define NVME_IDENTIFY_DATA_SIZE 4096
979
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200980struct nvme_features {
981 __u8 opcode;
982 __u8 flags;
983 __u16 command_id;
984 __le32 nsid;
985 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200986 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200987 __le32 fid;
988 __le32 dword11;
Arnav Dawnb85cf732017-05-12 17:12:03 +0200989 __le32 dword12;
990 __le32 dword13;
991 __le32 dword14;
992 __le32 dword15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200993};
994
Christoph Hellwig39673e12017-01-09 15:36:28 +0100995struct nvme_host_mem_buf_desc {
996 __le64 addr;
997 __le32 size;
998 __u32 rsvd;
999};
1000
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001001struct nvme_create_cq {
1002 __u8 opcode;
1003 __u8 flags;
1004 __u16 command_id;
1005 __u32 rsvd1[5];
1006 __le64 prp1;
1007 __u64 rsvd8;
1008 __le16 cqid;
1009 __le16 qsize;
1010 __le16 cq_flags;
1011 __le16 irq_vector;
1012 __u32 rsvd12[4];
1013};
1014
1015struct nvme_create_sq {
1016 __u8 opcode;
1017 __u8 flags;
1018 __u16 command_id;
1019 __u32 rsvd1[5];
1020 __le64 prp1;
1021 __u64 rsvd8;
1022 __le16 sqid;
1023 __le16 qsize;
1024 __le16 sq_flags;
1025 __le16 cqid;
1026 __u32 rsvd12[4];
1027};
1028
1029struct nvme_delete_queue {
1030 __u8 opcode;
1031 __u8 flags;
1032 __u16 command_id;
1033 __u32 rsvd1[9];
1034 __le16 qid;
1035 __u16 rsvd10;
1036 __u32 rsvd11[5];
1037};
1038
1039struct nvme_abort_cmd {
1040 __u8 opcode;
1041 __u8 flags;
1042 __u16 command_id;
1043 __u32 rsvd1[9];
1044 __le16 sqid;
1045 __u16 cid;
1046 __u32 rsvd11[5];
1047};
1048
1049struct nvme_download_firmware {
1050 __u8 opcode;
1051 __u8 flags;
1052 __u16 command_id;
1053 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001054 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001055 __le32 numd;
1056 __le32 offset;
1057 __u32 rsvd12[4];
1058};
1059
1060struct nvme_format_cmd {
1061 __u8 opcode;
1062 __u8 flags;
1063 __u16 command_id;
1064 __le32 nsid;
1065 __u64 rsvd2[4];
1066 __le32 cdw10;
1067 __u32 rsvd11[5];
1068};
1069
Armen Baloyan725b3582016-06-06 23:20:44 +02001070struct nvme_get_log_page_command {
1071 __u8 opcode;
1072 __u8 flags;
1073 __u16 command_id;
1074 __le32 nsid;
1075 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001076 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +02001077 __u8 lid;
Christoph Hellwig9b89bc32018-05-12 18:18:12 +02001078 __u8 lsp; /* upper 4 bits reserved */
Armen Baloyan725b3582016-06-06 23:20:44 +02001079 __le16 numdl;
1080 __le16 numdu;
1081 __u16 rsvd11;
Keith Buschd808b7f2019-04-09 10:03:59 -06001082 union {
1083 struct {
1084 __le32 lpol;
1085 __le32 lpou;
1086 };
1087 __le64 lpo;
1088 };
Armen Baloyan725b3582016-06-06 23:20:44 +02001089 __u32 rsvd14[2];
1090};
1091
Jens Axboef5d11842017-06-27 12:03:06 -06001092struct nvme_directive_cmd {
1093 __u8 opcode;
1094 __u8 flags;
1095 __u16 command_id;
1096 __le32 nsid;
1097 __u64 rsvd2[2];
1098 union nvme_data_ptr dptr;
1099 __le32 numd;
1100 __u8 doper;
1101 __u8 dtype;
1102 __le16 dspec;
1103 __u8 endir;
1104 __u8 tdtype;
1105 __u16 rsvd15;
1106
1107 __u32 rsvd16[3];
1108};
1109
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001110/*
1111 * Fabrics subcommands.
1112 */
1113enum nvmf_fabrics_opcode {
1114 nvme_fabrics_command = 0x7f,
1115};
1116
1117enum nvmf_capsule_command {
1118 nvme_fabrics_type_property_set = 0x00,
1119 nvme_fabrics_type_connect = 0x01,
1120 nvme_fabrics_type_property_get = 0x04,
1121};
1122
Minwoo Imad795e42019-06-12 21:45:31 +09001123#define nvme_fabrics_type_name(type) { type, #type }
1124#define show_fabrics_type_name(type) \
1125 __print_symbolic(type, \
1126 nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
1127 nvme_fabrics_type_name(nvme_fabrics_type_connect), \
1128 nvme_fabrics_type_name(nvme_fabrics_type_property_get))
1129
1130/*
1131 * If not fabrics command, fctype will be ignored.
1132 */
1133#define show_opcode_name(qid, opcode, fctype) \
1134 ((opcode) == nvme_fabrics_command ? \
1135 show_fabrics_type_name(fctype) : \
1136 ((qid) ? \
1137 show_nvm_opcode_name(opcode) : \
1138 show_admin_opcode_name(opcode)))
1139
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001140struct nvmf_common_command {
1141 __u8 opcode;
1142 __u8 resv1;
1143 __u16 command_id;
1144 __u8 fctype;
1145 __u8 resv2[35];
1146 __u8 ts[24];
1147};
1148
1149/*
1150 * The legal cntlid range a NVMe Target will provide.
1151 * Note that cntlid of value 0 is considered illegal in the fabrics world.
1152 * Devices based on earlier specs did not have the subsystem concept;
1153 * therefore, those devices had their cntlid value set to 0 as a result.
1154 */
1155#define NVME_CNTLID_MIN 1
1156#define NVME_CNTLID_MAX 0xffef
1157#define NVME_CNTLID_DYNAMIC 0xffff
1158
1159#define MAX_DISC_LOGS 255
1160
1161/* Discovery log page entry */
1162struct nvmf_disc_rsp_page_entry {
1163 __u8 trtype;
1164 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +02001165 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001166 __u8 treq;
1167 __le16 portid;
1168 __le16 cntlid;
1169 __le16 asqsz;
1170 __u8 resv8[22];
1171 char trsvcid[NVMF_TRSVCID_SIZE];
1172 __u8 resv64[192];
1173 char subnqn[NVMF_NQN_FIELD_LEN];
1174 char traddr[NVMF_TRADDR_SIZE];
1175 union tsas {
1176 char common[NVMF_TSAS_SIZE];
1177 struct rdma {
1178 __u8 qptype;
1179 __u8 prtype;
1180 __u8 cms;
1181 __u8 resv3[5];
1182 __u16 pkey;
1183 __u8 resv10[246];
1184 } rdma;
1185 } tsas;
1186};
1187
1188/* Discovery log page header */
1189struct nvmf_disc_rsp_page_hdr {
1190 __le64 genctr;
1191 __le64 numrec;
1192 __le16 recfmt;
1193 __u8 resv14[1006];
Gustavo A. R. Silvaf1e71d72020-05-07 14:04:52 -05001194 struct nvmf_disc_rsp_page_entry entries[];
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001195};
1196
Sagi Grimberge6a622f2018-11-19 14:11:12 -08001197enum {
1198 NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
1199};
1200
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001201struct nvmf_connect_command {
1202 __u8 opcode;
1203 __u8 resv1;
1204 __u16 command_id;
1205 __u8 fctype;
1206 __u8 resv2[19];
1207 union nvme_data_ptr dptr;
1208 __le16 recfmt;
1209 __le16 qid;
1210 __le16 sqsize;
1211 __u8 cattr;
1212 __u8 resv3;
1213 __le32 kato;
1214 __u8 resv4[12];
1215};
1216
1217struct nvmf_connect_data {
Christoph Hellwig8e412262017-05-17 09:54:27 +02001218 uuid_t hostid;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001219 __le16 cntlid;
1220 char resv4[238];
1221 char subsysnqn[NVMF_NQN_FIELD_LEN];
1222 char hostnqn[NVMF_NQN_FIELD_LEN];
1223 char resv5[256];
1224};
1225
1226struct nvmf_property_set_command {
1227 __u8 opcode;
1228 __u8 resv1;
1229 __u16 command_id;
1230 __u8 fctype;
1231 __u8 resv2[35];
1232 __u8 attrib;
1233 __u8 resv3[3];
1234 __le32 offset;
1235 __le64 value;
1236 __u8 resv4[8];
1237};
1238
1239struct nvmf_property_get_command {
1240 __u8 opcode;
1241 __u8 resv1;
1242 __u16 command_id;
1243 __u8 fctype;
1244 __u8 resv2[35];
1245 __u8 attrib;
1246 __u8 resv3[3];
1247 __le32 offset;
1248 __u8 resv4[16];
1249};
1250
Helen Koikef9f38e32017-04-10 12:51:07 -03001251struct nvme_dbbuf {
1252 __u8 opcode;
1253 __u8 flags;
1254 __u16 command_id;
1255 __u32 rsvd1[5];
1256 __le64 prp1;
1257 __le64 prp2;
1258 __u32 rsvd12[6];
1259};
1260
Jens Axboef5d11842017-06-27 12:03:06 -06001261struct streams_directive_params {
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001262 __le16 msl;
1263 __le16 nssa;
1264 __le16 nsso;
Jens Axboef5d11842017-06-27 12:03:06 -06001265 __u8 rsvd[10];
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001266 __le32 sws;
1267 __le16 sgs;
1268 __le16 nsa;
1269 __le16 nso;
Jens Axboef5d11842017-06-27 12:03:06 -06001270 __u8 rsvd2[6];
1271};
1272
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001273struct nvme_command {
1274 union {
1275 struct nvme_common_command common;
1276 struct nvme_rw_command rw;
1277 struct nvme_identify identify;
1278 struct nvme_features features;
1279 struct nvme_create_cq create_cq;
1280 struct nvme_create_sq create_sq;
1281 struct nvme_delete_queue delete_queue;
1282 struct nvme_download_firmware dlfw;
1283 struct nvme_format_cmd format;
1284 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001285 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001286 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +02001287 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001288 struct nvmf_common_command fabrics;
1289 struct nvmf_connect_command connect;
1290 struct nvmf_property_set_command prop_set;
1291 struct nvmf_property_get_command prop_get;
Helen Koikef9f38e32017-04-10 12:51:07 -03001292 struct nvme_dbbuf dbbuf;
Jens Axboef5d11842017-06-27 12:03:06 -06001293 struct nvme_directive_cmd directive;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001294 };
1295};
1296
Minwoo Im7a1f46e2019-06-06 14:30:14 +09001297static inline bool nvme_is_fabrics(struct nvme_command *cmd)
1298{
1299 return cmd->common.opcode == nvme_fabrics_command;
1300}
1301
Chaitanya Kulkarnib34de7c2018-12-12 15:11:38 -08001302struct nvme_error_slot {
1303 __le64 error_count;
1304 __le16 sqid;
1305 __le16 cmdid;
1306 __le16 status_field;
1307 __le16 param_error_location;
1308 __le64 lba;
1309 __le32 nsid;
1310 __u8 vs;
1311 __u8 resv[3];
1312 __le64 cs;
1313 __u8 resv2[24];
1314};
1315
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001316static inline bool nvme_is_write(struct nvme_command *cmd)
1317{
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001318 /*
1319 * What a mess...
1320 *
1321 * Why can't we simply have a Fabrics In and Fabrics out command?
1322 */
Minwoo Im7a1f46e2019-06-06 14:30:14 +09001323 if (unlikely(nvme_is_fabrics(cmd)))
Jon Derrick2fd41672017-07-12 10:58:19 -06001324 return cmd->fabrics.fctype & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001325 return cmd->common.opcode & 1;
1326}
1327
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001328enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001329 /*
1330 * Generic Command Status:
1331 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001332 NVME_SC_SUCCESS = 0x0,
1333 NVME_SC_INVALID_OPCODE = 0x1,
1334 NVME_SC_INVALID_FIELD = 0x2,
1335 NVME_SC_CMDID_CONFLICT = 0x3,
1336 NVME_SC_DATA_XFER_ERROR = 0x4,
1337 NVME_SC_POWER_LOSS = 0x5,
1338 NVME_SC_INTERNAL = 0x6,
1339 NVME_SC_ABORT_REQ = 0x7,
1340 NVME_SC_ABORT_QUEUE = 0x8,
1341 NVME_SC_FUSED_FAIL = 0x9,
1342 NVME_SC_FUSED_MISSING = 0xa,
1343 NVME_SC_INVALID_NS = 0xb,
1344 NVME_SC_CMD_SEQ_ERROR = 0xc,
1345 NVME_SC_SGL_INVALID_LAST = 0xd,
1346 NVME_SC_SGL_INVALID_COUNT = 0xe,
1347 NVME_SC_SGL_INVALID_DATA = 0xf,
1348 NVME_SC_SGL_INVALID_METADATA = 0x10,
1349 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001350
1351 NVME_SC_SGL_INVALID_OFFSET = 0x16,
1352 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
1353
Revanth Rajashekar48c9e852019-10-14 11:16:07 -06001354 NVME_SC_SANITIZE_FAILED = 0x1C,
1355 NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
1356
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -07001357 NVME_SC_NS_WRITE_PROTECTED = 0x20,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -06001358 NVME_SC_CMD_INTERRUPTED = 0x21,
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -07001359
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001360 NVME_SC_LBA_RANGE = 0x80,
1361 NVME_SC_CAP_EXCEEDED = 0x81,
1362 NVME_SC_NS_NOT_READY = 0x82,
1363 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001364
1365 /*
1366 * Command Specific Status:
1367 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001368 NVME_SC_CQ_INVALID = 0x100,
1369 NVME_SC_QID_INVALID = 0x101,
1370 NVME_SC_QUEUE_SIZE = 0x102,
1371 NVME_SC_ABORT_LIMIT = 0x103,
1372 NVME_SC_ABORT_MISSING = 0x104,
1373 NVME_SC_ASYNC_LIMIT = 0x105,
1374 NVME_SC_FIRMWARE_SLOT = 0x106,
1375 NVME_SC_FIRMWARE_IMAGE = 0x107,
1376 NVME_SC_INVALID_VECTOR = 0x108,
1377 NVME_SC_INVALID_LOG_PAGE = 0x109,
1378 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001379 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001380 NVME_SC_INVALID_QUEUE = 0x10c,
1381 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
1382 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
1383 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001384 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
1385 NVME_SC_FW_NEEDS_RESET = 0x111,
1386 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
Minwoo Im9581ae4f2019-05-11 22:42:54 +09001387 NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001388 NVME_SC_OVERLAPPING_RANGE = 0x114,
Minwoo Im9581ae4f2019-05-11 22:42:54 +09001389 NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001390 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
1391 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
1392 NVME_SC_NS_IS_PRIVATE = 0x119,
1393 NVME_SC_NS_NOT_ATTACHED = 0x11a,
1394 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
1395 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Revanth Rajashekar48c9e852019-10-14 11:16:07 -06001396 NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
1397 NVME_SC_PMR_SAN_PROHIBITED = 0x123,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001398
1399 /*
1400 * I/O Command Set Specific - NVM commands:
1401 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001402 NVME_SC_BAD_ATTRIBUTES = 0x180,
1403 NVME_SC_INVALID_PI = 0x181,
1404 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001405 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001406
1407 /*
1408 * I/O Command Set Specific - Fabrics commands:
1409 */
1410 NVME_SC_CONNECT_FORMAT = 0x180,
1411 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
1412 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
1413 NVME_SC_CONNECT_RESTART_DISC = 0x183,
1414 NVME_SC_CONNECT_INVALID_HOST = 0x184,
1415
1416 NVME_SC_DISCOVERY_RESTART = 0x190,
1417 NVME_SC_AUTH_REQUIRED = 0x191,
1418
1419 /*
1420 * Media and Data Integrity Errors:
1421 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001422 NVME_SC_WRITE_FAULT = 0x280,
1423 NVME_SC_READ_ERROR = 0x281,
1424 NVME_SC_GUARD_CHECK = 0x282,
1425 NVME_SC_APPTAG_CHECK = 0x283,
1426 NVME_SC_REFTAG_CHECK = 0x284,
1427 NVME_SC_COMPARE_FAILED = 0x285,
1428 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001429 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001430
Christoph Hellwig1a376212018-05-13 18:53:57 +02001431 /*
1432 * Path-related Errors:
1433 */
1434 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
1435 NVME_SC_ANA_INACCESSIBLE = 0x302,
1436 NVME_SC_ANA_TRANSITION = 0x303,
James Smart783f4a42018-09-27 16:58:54 -07001437 NVME_SC_HOST_PATH_ERROR = 0x370,
Max Gurtovoy2dc39472019-10-13 19:57:35 +03001438 NVME_SC_HOST_ABORTED_CMD = 0x371,
Christoph Hellwig1a376212018-05-13 18:53:57 +02001439
Keith Busch49cd84b2018-11-27 09:40:57 -07001440 NVME_SC_CRD = 0x1800,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001441 NVME_SC_DNR = 0x4000,
1442};
1443
1444struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001445 /*
1446 * Used by Admin and Fabrics commands to return data:
1447 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001448 union nvme_result {
1449 __le16 u16;
1450 __le32 u32;
1451 __le64 u64;
1452 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001453 __le16 sq_head; /* how much of this queue may be reclaimed */
1454 __le16 sq_id; /* submission queue that generated this entry */
1455 __u16 command_id; /* of the command which completed */
1456 __le16 status; /* did the command fail, and if so, why? */
1457};
1458
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001459#define NVME_VS(major, minor, tertiary) \
1460 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001461
Johannes Thumshirnc61d7882017-06-07 11:45:36 +02001462#define NVME_MAJOR(ver) ((ver) >> 16)
1463#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
1464#define NVME_TERTIARY(ver) ((ver) & 0xff)
1465
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001466#endif /* _LINUX_NVME_H */