blob: 80dfedcf0bf72254b9ed0c4773173d97ecdafe3b [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020018#include <linux/types.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020019#include <linux/uuid.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020020
21/* NQN names in commands fields specified one size */
22#define NVMF_NQN_FIELD_LEN 256
23
24/* However the max length of a qualified name is another size */
25#define NVMF_NQN_SIZE 223
26
27#define NVMF_TRSVCID_SIZE 32
28#define NVMF_TRADDR_SIZE 256
29#define NVMF_TSAS_SIZE 256
30
31#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
32
33#define NVME_RDMA_IP_PORT 4420
34
Arnav Dawn62346ea2017-07-12 16:11:53 +053035#define NVME_NSID_ALL 0xffffffff
36
Christoph Hellwigeb793e22016-06-13 16:45:25 +020037enum nvme_subsys_type {
38 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
39 NVME_NQN_NVME = 2, /* NVME type target subsystem */
40};
41
42/* Address Family codes for Discovery Log Page entry ADRFAM field */
43enum {
44 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
45 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
46 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
47 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
48 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
49};
50
51/* Transport Type codes for Discovery Log Page entry TRTYPE field */
52enum {
53 NVMF_TRTYPE_RDMA = 1, /* RDMA */
54 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
55 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
56 NVMF_TRTYPE_MAX,
57};
58
59/* Transport Requirements codes for Discovery Log Page entry TREQ field */
60enum {
61 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
62 NVMF_TREQ_REQUIRED = 1, /* Required */
63 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
64};
65
66/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
67 * RDMA_QPTYPE field
68 */
69enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080070 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
71 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020072};
73
74/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
75 * RDMA_QPTYPE field
76 */
77enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080078 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
79 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
80 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
81 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
82 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020083};
84
85/* RDMA Connection Management Service Type codes for Discovery Log Page
86 * entry TSAS RDMA_CMS field
87 */
88enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080089 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020090};
91
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030092#define NVME_AQ_DEPTH 32
Keith Busch38dabe22017-11-07 15:13:10 -070093#define NVME_NR_AEN_COMMANDS 1
94#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
95
96/*
97 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
98 * NVM-Express 1.2 specification, section 4.1.2.
99 */
100#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
Christoph Hellwig2812dfe2015-10-09 18:19:20 +0200101
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100102enum {
103 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
104 NVME_REG_VS = 0x0008, /* Version */
105 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800106 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100107 NVME_REG_CC = 0x0014, /* Controller Configuration */
108 NVME_REG_CSTS = 0x001c, /* Controller Status */
109 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
110 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
111 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800112 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100113 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
114 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Xu Yu97f6ef62017-05-24 16:39:55 +0800115 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500116};
117
Keith Buscha0cadb82012-07-27 13:57:23 -0400118#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400119#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400120#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600121#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600122#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600123#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400124
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600125#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
126#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600127
Christoph Hellwig88de4592017-12-20 14:50:00 +0100128enum {
129 NVME_CMBSZ_SQS = 1 << 0,
130 NVME_CMBSZ_CQS = 1 << 1,
131 NVME_CMBSZ_LISTS = 1 << 2,
132 NVME_CMBSZ_RDS = 1 << 3,
133 NVME_CMBSZ_WDS = 1 << 4,
134
135 NVME_CMBSZ_SZ_SHIFT = 12,
136 NVME_CMBSZ_SZ_MASK = 0xfffff,
137
138 NVME_CMBSZ_SZU_SHIFT = 8,
139 NVME_CMBSZ_SZU_MASK = 0xf,
140};
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600141
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200142/*
143 * Submission and Completion Queue Entry Sizes for the NVM command set.
144 * (In bytes and specified as a power of two (2^n)).
145 */
146#define NVME_NVM_IOSQES 6
147#define NVME_NVM_IOCQES 4
148
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500149enum {
150 NVME_CC_ENABLE = 1 << 0,
151 NVME_CC_CSS_NVM = 0 << 4,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300152 NVME_CC_EN_SHIFT = 0,
153 NVME_CC_CSS_SHIFT = 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500154 NVME_CC_MPS_SHIFT = 7,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300155 NVME_CC_AMS_SHIFT = 11,
156 NVME_CC_SHN_SHIFT = 14,
157 NVME_CC_IOSQES_SHIFT = 16,
158 NVME_CC_IOCQES_SHIFT = 20,
Max Gurtovoy60b43f62017-08-13 19:21:07 +0300159 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
160 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
161 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300162 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
163 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
164 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
165 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
166 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
167 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500168 NVME_CSTS_RDY = 1 << 0,
169 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600170 NVME_CSTS_NSSRO = 1 << 4,
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530171 NVME_CSTS_PP = 1 << 5,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500172 NVME_CSTS_SHST_NORMAL = 0 << 2,
173 NVME_CSTS_SHST_OCCUR = 1 << 2,
174 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600175 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500176};
177
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200178struct nvme_id_power_state {
179 __le16 max_power; /* centiwatts */
180 __u8 rsvd2;
181 __u8 flags;
182 __le32 entry_lat; /* microseconds */
183 __le32 exit_lat; /* microseconds */
184 __u8 read_tput;
185 __u8 read_lat;
186 __u8 write_tput;
187 __u8 write_lat;
188 __le16 idle_power;
189 __u8 idle_scale;
190 __u8 rsvd19;
191 __le16 active_power;
192 __u8 active_work_scale;
193 __u8 rsvd23[9];
194};
195
196enum {
197 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
198 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
199};
200
201struct nvme_id_ctrl {
202 __le16 vid;
203 __le16 ssvid;
204 char sn[20];
205 char mn[40];
206 char fr[8];
207 __u8 rab;
208 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200209 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200210 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200211 __le16 cntlid;
212 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200213 __le32 rtd3r;
214 __le32 rtd3e;
215 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200216 __le32 ctratt;
217 __u8 rsvd100[156];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200218 __le16 oacs;
219 __u8 acl;
220 __u8 aerl;
221 __u8 frmw;
222 __u8 lpa;
223 __u8 elpe;
224 __u8 npss;
225 __u8 avscc;
226 __u8 apsta;
227 __le16 wctemp;
228 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200229 __le16 mtfa;
230 __le32 hmpre;
231 __le32 hmmin;
232 __u8 tnvmcap[16];
233 __u8 unvmcap[16];
234 __le32 rpmbs;
Guan Junxiong435e8092017-06-13 09:26:15 +0800235 __le16 edstt;
236 __u8 dsto;
237 __u8 fwug;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200238 __le16 kas;
Guan Junxiong435e8092017-06-13 09:26:15 +0800239 __le16 hctma;
240 __le16 mntmt;
241 __le16 mxtmt;
242 __le32 sanicap;
Christoph Hellwig044a9df2017-09-11 12:09:28 -0400243 __le32 hmminds;
244 __le16 hmmaxd;
245 __u8 rsvd338[174];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200246 __u8 sqes;
247 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200248 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200249 __le32 nn;
250 __le16 oncs;
251 __le16 fuses;
252 __u8 fna;
253 __u8 vwc;
254 __le16 awun;
255 __le16 awupf;
256 __u8 nvscc;
257 __u8 rsvd531;
258 __le16 acwu;
259 __u8 rsvd534[2];
260 __le32 sgls;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200261 __u8 rsvd540[228];
262 char subnqn[256];
263 __u8 rsvd1024[768];
264 __le32 ioccsz;
265 __le32 iorcsz;
266 __le16 icdoff;
267 __u8 ctrattr;
268 __u8 msdbd;
269 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200270 struct nvme_id_power_state psd[32];
271 __u8 vs[1024];
272};
273
274enum {
275 NVME_CTRL_ONCS_COMPARE = 1 << 0,
276 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
277 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800278 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200279 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200280 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100281 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Jens Axboef5d11842017-06-27 12:03:06 -0600282 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
Changpeng Liu223694b2017-08-31 11:22:49 +0800283 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
Keith Busch84fef622017-11-07 10:28:32 -0700284 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200285};
286
287struct nvme_lbaf {
288 __le16 ms;
289 __u8 ds;
290 __u8 rp;
291};
292
293struct nvme_id_ns {
294 __le64 nsze;
295 __le64 ncap;
296 __le64 nuse;
297 __u8 nsfeat;
298 __u8 nlbaf;
299 __u8 flbas;
300 __u8 mc;
301 __u8 dpc;
302 __u8 dps;
303 __u8 nmic;
304 __u8 rescap;
305 __u8 fpi;
306 __u8 rsvd33;
307 __le16 nawun;
308 __le16 nawupf;
309 __le16 nacwu;
310 __le16 nabsn;
311 __le16 nabo;
312 __le16 nabspf;
Scott Bauer6b8190d2017-06-15 10:44:30 -0600313 __le16 noiob;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200314 __u8 nvmcap[16];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200315 __u8 rsvd64[40];
316 __u8 nguid[16];
317 __u8 eui64[8];
318 struct nvme_lbaf lbaf[16];
319 __u8 rsvd192[192];
320 __u8 vs[3712];
321};
322
323enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200324 NVME_ID_CNS_NS = 0x00,
325 NVME_ID_CNS_CTRL = 0x01,
326 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200327 NVME_ID_CNS_NS_DESC_LIST = 0x03,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200328 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
329 NVME_ID_CNS_NS_PRESENT = 0x11,
330 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
331 NVME_ID_CNS_CTRL_LIST = 0x13,
332};
333
334enum {
Jens Axboef5d11842017-06-27 12:03:06 -0600335 NVME_DIR_IDENTIFY = 0x00,
336 NVME_DIR_STREAMS = 0x01,
337 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
338 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
339 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
340 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
341 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
342 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
343 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
344 NVME_DIR_ENDIR = 0x01,
345};
346
347enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200348 NVME_NS_FEAT_THIN = 1 << 0,
349 NVME_NS_FLBAS_LBA_MASK = 0xf,
350 NVME_NS_FLBAS_META_EXT = 0x10,
351 NVME_LBAF_RP_BEST = 0,
352 NVME_LBAF_RP_BETTER = 1,
353 NVME_LBAF_RP_GOOD = 2,
354 NVME_LBAF_RP_DEGRADED = 3,
355 NVME_NS_DPC_PI_LAST = 1 << 4,
356 NVME_NS_DPC_PI_FIRST = 1 << 3,
357 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
358 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
359 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
360 NVME_NS_DPS_PI_FIRST = 1 << 3,
361 NVME_NS_DPS_PI_MASK = 0x7,
362 NVME_NS_DPS_PI_TYPE1 = 1,
363 NVME_NS_DPS_PI_TYPE2 = 2,
364 NVME_NS_DPS_PI_TYPE3 = 3,
365};
366
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200367struct nvme_ns_id_desc {
368 __u8 nidt;
369 __u8 nidl;
370 __le16 reserved;
371};
372
373#define NVME_NIDT_EUI64_LEN 8
374#define NVME_NIDT_NGUID_LEN 16
375#define NVME_NIDT_UUID_LEN 16
376
377enum {
378 NVME_NIDT_EUI64 = 0x01,
379 NVME_NIDT_NGUID = 0x02,
380 NVME_NIDT_UUID = 0x03,
381};
382
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200383struct nvme_smart_log {
384 __u8 critical_warning;
385 __u8 temperature[2];
386 __u8 avail_spare;
387 __u8 spare_thresh;
388 __u8 percent_used;
389 __u8 rsvd6[26];
390 __u8 data_units_read[16];
391 __u8 data_units_written[16];
392 __u8 host_reads[16];
393 __u8 host_writes[16];
394 __u8 ctrl_busy_time[16];
395 __u8 power_cycles[16];
396 __u8 power_on_hours[16];
397 __u8 unsafe_shutdowns[16];
398 __u8 media_errors[16];
399 __u8 num_err_log_entries[16];
400 __le32 warning_temp_time;
401 __le32 critical_comp_time;
402 __le16 temp_sensor[8];
403 __u8 rsvd216[296];
404};
405
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530406struct nvme_fw_slot_info_log {
407 __u8 afi;
408 __u8 rsvd1[7];
409 __le64 frs[7];
410 __u8 rsvd64[448];
411};
412
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200413enum {
Keith Busch84fef622017-11-07 10:28:32 -0700414 NVME_CMD_EFFECTS_CSUPP = 1 << 0,
415 NVME_CMD_EFFECTS_LBCC = 1 << 1,
416 NVME_CMD_EFFECTS_NCC = 1 << 2,
417 NVME_CMD_EFFECTS_NIC = 1 << 3,
418 NVME_CMD_EFFECTS_CCC = 1 << 4,
419 NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
420};
421
422struct nvme_effects_log {
423 __le32 acs[256];
424 __le32 iocs[256];
425 __u8 resv[2048];
426};
427
428enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200429 NVME_SMART_CRIT_SPARE = 1 << 0,
430 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
431 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
432 NVME_SMART_CRIT_MEDIA = 1 << 3,
433 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
434};
435
436enum {
Keith Busche3d78742017-11-07 15:13:14 -0700437 NVME_AER_ERROR = 0,
438 NVME_AER_SMART = 1,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200439 NVME_AER_NOTICE = 2,
Keith Busche3d78742017-11-07 15:13:14 -0700440 NVME_AER_CSS = 6,
441 NVME_AER_VS = 7,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200442};
443
444enum {
445 NVME_AER_NOTICE_NS_CHANGED = 0x00,
446 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200447};
448
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200449enum {
450 NVME_AEN_CFG_NS_ATTR = 1 << 8,
451 NVME_AEN_CFG_FW_ACT = 1 << 9,
452};
453
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200454struct nvme_lba_range_type {
455 __u8 type;
456 __u8 attributes;
457 __u8 rsvd2[14];
458 __u64 slba;
459 __u64 nlb;
460 __u8 guid[16];
461 __u8 rsvd48[16];
462};
463
464enum {
465 NVME_LBART_TYPE_FS = 0x01,
466 NVME_LBART_TYPE_RAID = 0x02,
467 NVME_LBART_TYPE_CACHE = 0x03,
468 NVME_LBART_TYPE_SWAP = 0x04,
469
470 NVME_LBART_ATTRIB_TEMP = 1 << 0,
471 NVME_LBART_ATTRIB_HIDE = 1 << 1,
472};
473
474struct nvme_reservation_status {
475 __le32 gen;
476 __u8 rtype;
477 __u8 regctl[2];
478 __u8 resv5[2];
479 __u8 ptpls;
480 __u8 resv10[13];
481 struct {
482 __le16 cntlid;
483 __u8 rcsts;
484 __u8 resv3[5];
485 __le64 hostid;
486 __le64 rkey;
487 } regctl_ds[];
488};
489
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200490enum nvme_async_event_type {
491 NVME_AER_TYPE_ERROR = 0,
492 NVME_AER_TYPE_SMART = 1,
493 NVME_AER_TYPE_NOTICE = 2,
494};
495
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200496/* I/O commands */
497
498enum nvme_opcode {
499 nvme_cmd_flush = 0x00,
500 nvme_cmd_write = 0x01,
501 nvme_cmd_read = 0x02,
502 nvme_cmd_write_uncor = 0x04,
503 nvme_cmd_compare = 0x05,
504 nvme_cmd_write_zeroes = 0x08,
505 nvme_cmd_dsm = 0x09,
506 nvme_cmd_resv_register = 0x0d,
507 nvme_cmd_resv_report = 0x0e,
508 nvme_cmd_resv_acquire = 0x11,
509 nvme_cmd_resv_release = 0x15,
510};
511
James Smart3972be22016-06-06 23:20:47 +0200512/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200513 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
514 *
515 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
516 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
James Smartd85cf202017-09-07 13:20:23 -0700517 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200518 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
519 * request subtype
520 */
521enum {
522 NVME_SGL_FMT_ADDRESS = 0x00,
523 NVME_SGL_FMT_OFFSET = 0x01,
James Smartd85cf202017-09-07 13:20:23 -0700524 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200525 NVME_SGL_FMT_INVALIDATE = 0x0f,
526};
527
528/*
529 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
530 *
531 * For struct nvme_sgl_desc:
532 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
533 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
534 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
535 *
536 * For struct nvme_keyed_sgl_desc:
537 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
James Smartd85cf202017-09-07 13:20:23 -0700538 *
539 * Transport-specific SGL types:
540 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200541 */
542enum {
543 NVME_SGL_FMT_DATA_DESC = 0x00,
544 NVME_SGL_FMT_SEG_DESC = 0x02,
545 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
546 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
James Smartd85cf202017-09-07 13:20:23 -0700547 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200548};
549
550struct nvme_sgl_desc {
551 __le64 addr;
552 __le32 length;
553 __u8 rsvd[3];
554 __u8 type;
555};
556
557struct nvme_keyed_sgl_desc {
558 __le64 addr;
559 __u8 length[3];
560 __u8 key[4];
561 __u8 type;
562};
563
564union nvme_data_ptr {
565 struct {
566 __le64 prp1;
567 __le64 prp2;
568 };
569 struct nvme_sgl_desc sgl;
570 struct nvme_keyed_sgl_desc ksgl;
571};
572
573/*
James Smart3972be22016-06-06 23:20:47 +0200574 * Lowest two bits of our flags field (FUSE field in the spec):
575 *
576 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
577 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
578 *
579 * Highest two bits in our flags field (PSDT field in the spec):
580 *
581 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
582 * If used, MPTR contains addr of single physical buffer (byte aligned).
583 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
584 * If used, MPTR contains an address of an SGL segment containing
585 * exactly 1 SGL descriptor (qword aligned).
586 */
587enum {
588 NVME_CMD_FUSE_FIRST = (1 << 0),
589 NVME_CMD_FUSE_SECOND = (1 << 1),
590
591 NVME_CMD_SGL_METABUF = (1 << 6),
592 NVME_CMD_SGL_METASEG = (1 << 7),
593 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
594};
595
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200596struct nvme_common_command {
597 __u8 opcode;
598 __u8 flags;
599 __u16 command_id;
600 __le32 nsid;
601 __le32 cdw2[2];
602 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200603 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200604 __le32 cdw10[6];
605};
606
607struct nvme_rw_command {
608 __u8 opcode;
609 __u8 flags;
610 __u16 command_id;
611 __le32 nsid;
612 __u64 rsvd2;
613 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200614 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200615 __le64 slba;
616 __le16 length;
617 __le16 control;
618 __le32 dsmgmt;
619 __le32 reftag;
620 __le16 apptag;
621 __le16 appmask;
622};
623
624enum {
625 NVME_RW_LR = 1 << 15,
626 NVME_RW_FUA = 1 << 14,
627 NVME_RW_DSM_FREQ_UNSPEC = 0,
628 NVME_RW_DSM_FREQ_TYPICAL = 1,
629 NVME_RW_DSM_FREQ_RARE = 2,
630 NVME_RW_DSM_FREQ_READS = 3,
631 NVME_RW_DSM_FREQ_WRITES = 4,
632 NVME_RW_DSM_FREQ_RW = 5,
633 NVME_RW_DSM_FREQ_ONCE = 6,
634 NVME_RW_DSM_FREQ_PREFETCH = 7,
635 NVME_RW_DSM_FREQ_TEMP = 8,
636 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
637 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
638 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
639 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
640 NVME_RW_DSM_SEQ_REQ = 1 << 6,
641 NVME_RW_DSM_COMPRESSED = 1 << 7,
642 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
643 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
644 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
645 NVME_RW_PRINFO_PRACT = 1 << 13,
Jens Axboef5d11842017-06-27 12:03:06 -0600646 NVME_RW_DTYPE_STREAMS = 1 << 4,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200647};
648
649struct nvme_dsm_cmd {
650 __u8 opcode;
651 __u8 flags;
652 __u16 command_id;
653 __le32 nsid;
654 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200655 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200656 __le32 nr;
657 __le32 attributes;
658 __u32 rsvd12[4];
659};
660
661enum {
662 NVME_DSMGMT_IDR = 1 << 0,
663 NVME_DSMGMT_IDW = 1 << 1,
664 NVME_DSMGMT_AD = 1 << 2,
665};
666
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100667#define NVME_DSM_MAX_RANGES 256
668
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200669struct nvme_dsm_range {
670 __le32 cattr;
671 __le32 nlb;
672 __le64 slba;
673};
674
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800675struct nvme_write_zeroes_cmd {
676 __u8 opcode;
677 __u8 flags;
678 __u16 command_id;
679 __le32 nsid;
680 __u64 rsvd2;
681 __le64 metadata;
682 union nvme_data_ptr dptr;
683 __le64 slba;
684 __le16 length;
685 __le16 control;
686 __le32 dsmgmt;
687 __le32 reftag;
688 __le16 apptag;
689 __le16 appmask;
690};
691
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800692/* Features */
693
694struct nvme_feat_auto_pst {
695 __le64 entries[32];
696};
697
Christoph Hellwig39673e12017-01-09 15:36:28 +0100698enum {
699 NVME_HOST_MEM_ENABLE = (1 << 0),
700 NVME_HOST_MEM_RETURN = (1 << 1),
701};
702
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200703/* Admin commands */
704
705enum nvme_admin_opcode {
706 nvme_admin_delete_sq = 0x00,
707 nvme_admin_create_sq = 0x01,
708 nvme_admin_get_log_page = 0x02,
709 nvme_admin_delete_cq = 0x04,
710 nvme_admin_create_cq = 0x05,
711 nvme_admin_identify = 0x06,
712 nvme_admin_abort_cmd = 0x08,
713 nvme_admin_set_features = 0x09,
714 nvme_admin_get_features = 0x0a,
715 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200716 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200717 nvme_admin_activate_fw = 0x10,
718 nvme_admin_download_fw = 0x11,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200719 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200720 nvme_admin_keep_alive = 0x18,
Jens Axboef5d11842017-06-27 12:03:06 -0600721 nvme_admin_directive_send = 0x19,
722 nvme_admin_directive_recv = 0x1a,
Helen Koikef9f38e32017-04-10 12:51:07 -0300723 nvme_admin_dbbuf = 0x7C,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200724 nvme_admin_format_nvm = 0x80,
725 nvme_admin_security_send = 0x81,
726 nvme_admin_security_recv = 0x82,
Keith Busch84fef622017-11-07 10:28:32 -0700727 nvme_admin_sanitize_nvm = 0x84,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200728};
729
730enum {
731 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
732 NVME_CQ_IRQ_ENABLED = (1 << 1),
733 NVME_SQ_PRIO_URGENT = (0 << 1),
734 NVME_SQ_PRIO_HIGH = (1 << 1),
735 NVME_SQ_PRIO_MEDIUM = (2 << 1),
736 NVME_SQ_PRIO_LOW = (3 << 1),
737 NVME_FEAT_ARBITRATION = 0x01,
738 NVME_FEAT_POWER_MGMT = 0x02,
739 NVME_FEAT_LBA_RANGE = 0x03,
740 NVME_FEAT_TEMP_THRESH = 0x04,
741 NVME_FEAT_ERR_RECOVERY = 0x05,
742 NVME_FEAT_VOLATILE_WC = 0x06,
743 NVME_FEAT_NUM_QUEUES = 0x07,
744 NVME_FEAT_IRQ_COALESCE = 0x08,
745 NVME_FEAT_IRQ_CONFIG = 0x09,
746 NVME_FEAT_WRITE_ATOMIC = 0x0a,
747 NVME_FEAT_ASYNC_EVENT = 0x0b,
748 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200749 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200750 NVME_FEAT_TIMESTAMP = 0x0e,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200751 NVME_FEAT_KATO = 0x0f,
Revanth Rajashekar40c6f9c2018-06-15 12:39:27 -0600752 NVME_FEAT_HCTM = 0x10,
753 NVME_FEAT_NOPSC = 0x11,
754 NVME_FEAT_RRL = 0x12,
755 NVME_FEAT_PLM_CONFIG = 0x13,
756 NVME_FEAT_PLM_WINDOW = 0x14,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200757 NVME_FEAT_SW_PROGRESS = 0x80,
758 NVME_FEAT_HOST_ID = 0x81,
759 NVME_FEAT_RESV_MASK = 0x82,
760 NVME_FEAT_RESV_PERSIST = 0x83,
761 NVME_LOG_ERROR = 0x01,
762 NVME_LOG_SMART = 0x02,
763 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200764 NVME_LOG_CHANGED_NS = 0x04,
Keith Busch84fef622017-11-07 10:28:32 -0700765 NVME_LOG_CMD_EFFECTS = 0x05,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200766 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200767 NVME_LOG_RESERVATION = 0x80,
768 NVME_FWACT_REPL = (0 << 3),
769 NVME_FWACT_REPL_ACTV = (1 << 3),
770 NVME_FWACT_ACTV = (2 << 3),
771};
772
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200773#define NVME_MAX_CHANGED_NAMESPACES 1024
774
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200775struct nvme_identify {
776 __u8 opcode;
777 __u8 flags;
778 __u16 command_id;
779 __le32 nsid;
780 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200781 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200782 __u8 cns;
783 __u8 rsvd3;
784 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200785 __u32 rsvd11[5];
786};
787
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200788#define NVME_IDENTIFY_DATA_SIZE 4096
789
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200790struct nvme_features {
791 __u8 opcode;
792 __u8 flags;
793 __u16 command_id;
794 __le32 nsid;
795 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200796 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200797 __le32 fid;
798 __le32 dword11;
Arnav Dawnb85cf732017-05-12 17:12:03 +0200799 __le32 dword12;
800 __le32 dword13;
801 __le32 dword14;
802 __le32 dword15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200803};
804
Christoph Hellwig39673e12017-01-09 15:36:28 +0100805struct nvme_host_mem_buf_desc {
806 __le64 addr;
807 __le32 size;
808 __u32 rsvd;
809};
810
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200811struct nvme_create_cq {
812 __u8 opcode;
813 __u8 flags;
814 __u16 command_id;
815 __u32 rsvd1[5];
816 __le64 prp1;
817 __u64 rsvd8;
818 __le16 cqid;
819 __le16 qsize;
820 __le16 cq_flags;
821 __le16 irq_vector;
822 __u32 rsvd12[4];
823};
824
825struct nvme_create_sq {
826 __u8 opcode;
827 __u8 flags;
828 __u16 command_id;
829 __u32 rsvd1[5];
830 __le64 prp1;
831 __u64 rsvd8;
832 __le16 sqid;
833 __le16 qsize;
834 __le16 sq_flags;
835 __le16 cqid;
836 __u32 rsvd12[4];
837};
838
839struct nvme_delete_queue {
840 __u8 opcode;
841 __u8 flags;
842 __u16 command_id;
843 __u32 rsvd1[9];
844 __le16 qid;
845 __u16 rsvd10;
846 __u32 rsvd11[5];
847};
848
849struct nvme_abort_cmd {
850 __u8 opcode;
851 __u8 flags;
852 __u16 command_id;
853 __u32 rsvd1[9];
854 __le16 sqid;
855 __u16 cid;
856 __u32 rsvd11[5];
857};
858
859struct nvme_download_firmware {
860 __u8 opcode;
861 __u8 flags;
862 __u16 command_id;
863 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200864 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200865 __le32 numd;
866 __le32 offset;
867 __u32 rsvd12[4];
868};
869
870struct nvme_format_cmd {
871 __u8 opcode;
872 __u8 flags;
873 __u16 command_id;
874 __le32 nsid;
875 __u64 rsvd2[4];
876 __le32 cdw10;
877 __u32 rsvd11[5];
878};
879
Armen Baloyan725b3582016-06-06 23:20:44 +0200880struct nvme_get_log_page_command {
881 __u8 opcode;
882 __u8 flags;
883 __u16 command_id;
884 __le32 nsid;
885 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200886 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +0200887 __u8 lid;
888 __u8 rsvd10;
889 __le16 numdl;
890 __le16 numdu;
891 __u16 rsvd11;
892 __le32 lpol;
893 __le32 lpou;
894 __u32 rsvd14[2];
895};
896
Jens Axboef5d11842017-06-27 12:03:06 -0600897struct nvme_directive_cmd {
898 __u8 opcode;
899 __u8 flags;
900 __u16 command_id;
901 __le32 nsid;
902 __u64 rsvd2[2];
903 union nvme_data_ptr dptr;
904 __le32 numd;
905 __u8 doper;
906 __u8 dtype;
907 __le16 dspec;
908 __u8 endir;
909 __u8 tdtype;
910 __u16 rsvd15;
911
912 __u32 rsvd16[3];
913};
914
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200915/*
916 * Fabrics subcommands.
917 */
918enum nvmf_fabrics_opcode {
919 nvme_fabrics_command = 0x7f,
920};
921
922enum nvmf_capsule_command {
923 nvme_fabrics_type_property_set = 0x00,
924 nvme_fabrics_type_connect = 0x01,
925 nvme_fabrics_type_property_get = 0x04,
926};
927
928struct nvmf_common_command {
929 __u8 opcode;
930 __u8 resv1;
931 __u16 command_id;
932 __u8 fctype;
933 __u8 resv2[35];
934 __u8 ts[24];
935};
936
937/*
938 * The legal cntlid range a NVMe Target will provide.
939 * Note that cntlid of value 0 is considered illegal in the fabrics world.
940 * Devices based on earlier specs did not have the subsystem concept;
941 * therefore, those devices had their cntlid value set to 0 as a result.
942 */
943#define NVME_CNTLID_MIN 1
944#define NVME_CNTLID_MAX 0xffef
945#define NVME_CNTLID_DYNAMIC 0xffff
946
947#define MAX_DISC_LOGS 255
948
949/* Discovery log page entry */
950struct nvmf_disc_rsp_page_entry {
951 __u8 trtype;
952 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200953 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200954 __u8 treq;
955 __le16 portid;
956 __le16 cntlid;
957 __le16 asqsz;
958 __u8 resv8[22];
959 char trsvcid[NVMF_TRSVCID_SIZE];
960 __u8 resv64[192];
961 char subnqn[NVMF_NQN_FIELD_LEN];
962 char traddr[NVMF_TRADDR_SIZE];
963 union tsas {
964 char common[NVMF_TSAS_SIZE];
965 struct rdma {
966 __u8 qptype;
967 __u8 prtype;
968 __u8 cms;
969 __u8 resv3[5];
970 __u16 pkey;
971 __u8 resv10[246];
972 } rdma;
973 } tsas;
974};
975
976/* Discovery log page header */
977struct nvmf_disc_rsp_page_hdr {
978 __le64 genctr;
979 __le64 numrec;
980 __le16 recfmt;
981 __u8 resv14[1006];
982 struct nvmf_disc_rsp_page_entry entries[0];
983};
984
985struct nvmf_connect_command {
986 __u8 opcode;
987 __u8 resv1;
988 __u16 command_id;
989 __u8 fctype;
990 __u8 resv2[19];
991 union nvme_data_ptr dptr;
992 __le16 recfmt;
993 __le16 qid;
994 __le16 sqsize;
995 __u8 cattr;
996 __u8 resv3;
997 __le32 kato;
998 __u8 resv4[12];
999};
1000
1001struct nvmf_connect_data {
Christoph Hellwig8e412262017-05-17 09:54:27 +02001002 uuid_t hostid;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001003 __le16 cntlid;
1004 char resv4[238];
1005 char subsysnqn[NVMF_NQN_FIELD_LEN];
1006 char hostnqn[NVMF_NQN_FIELD_LEN];
1007 char resv5[256];
1008};
1009
1010struct nvmf_property_set_command {
1011 __u8 opcode;
1012 __u8 resv1;
1013 __u16 command_id;
1014 __u8 fctype;
1015 __u8 resv2[35];
1016 __u8 attrib;
1017 __u8 resv3[3];
1018 __le32 offset;
1019 __le64 value;
1020 __u8 resv4[8];
1021};
1022
1023struct nvmf_property_get_command {
1024 __u8 opcode;
1025 __u8 resv1;
1026 __u16 command_id;
1027 __u8 fctype;
1028 __u8 resv2[35];
1029 __u8 attrib;
1030 __u8 resv3[3];
1031 __le32 offset;
1032 __u8 resv4[16];
1033};
1034
Helen Koikef9f38e32017-04-10 12:51:07 -03001035struct nvme_dbbuf {
1036 __u8 opcode;
1037 __u8 flags;
1038 __u16 command_id;
1039 __u32 rsvd1[5];
1040 __le64 prp1;
1041 __le64 prp2;
1042 __u32 rsvd12[6];
1043};
1044
Jens Axboef5d11842017-06-27 12:03:06 -06001045struct streams_directive_params {
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001046 __le16 msl;
1047 __le16 nssa;
1048 __le16 nsso;
Jens Axboef5d11842017-06-27 12:03:06 -06001049 __u8 rsvd[10];
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001050 __le32 sws;
1051 __le16 sgs;
1052 __le16 nsa;
1053 __le16 nso;
Jens Axboef5d11842017-06-27 12:03:06 -06001054 __u8 rsvd2[6];
1055};
1056
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001057struct nvme_command {
1058 union {
1059 struct nvme_common_command common;
1060 struct nvme_rw_command rw;
1061 struct nvme_identify identify;
1062 struct nvme_features features;
1063 struct nvme_create_cq create_cq;
1064 struct nvme_create_sq create_sq;
1065 struct nvme_delete_queue delete_queue;
1066 struct nvme_download_firmware dlfw;
1067 struct nvme_format_cmd format;
1068 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001069 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001070 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +02001071 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001072 struct nvmf_common_command fabrics;
1073 struct nvmf_connect_command connect;
1074 struct nvmf_property_set_command prop_set;
1075 struct nvmf_property_get_command prop_get;
Helen Koikef9f38e32017-04-10 12:51:07 -03001076 struct nvme_dbbuf dbbuf;
Jens Axboef5d11842017-06-27 12:03:06 -06001077 struct nvme_directive_cmd directive;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001078 };
1079};
1080
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001081static inline bool nvme_is_write(struct nvme_command *cmd)
1082{
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001083 /*
1084 * What a mess...
1085 *
1086 * Why can't we simply have a Fabrics In and Fabrics out command?
1087 */
1088 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
Jon Derrick2fd41672017-07-12 10:58:19 -06001089 return cmd->fabrics.fctype & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001090 return cmd->common.opcode & 1;
1091}
1092
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001093enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001094 /*
1095 * Generic Command Status:
1096 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001097 NVME_SC_SUCCESS = 0x0,
1098 NVME_SC_INVALID_OPCODE = 0x1,
1099 NVME_SC_INVALID_FIELD = 0x2,
1100 NVME_SC_CMDID_CONFLICT = 0x3,
1101 NVME_SC_DATA_XFER_ERROR = 0x4,
1102 NVME_SC_POWER_LOSS = 0x5,
1103 NVME_SC_INTERNAL = 0x6,
1104 NVME_SC_ABORT_REQ = 0x7,
1105 NVME_SC_ABORT_QUEUE = 0x8,
1106 NVME_SC_FUSED_FAIL = 0x9,
1107 NVME_SC_FUSED_MISSING = 0xa,
1108 NVME_SC_INVALID_NS = 0xb,
1109 NVME_SC_CMD_SEQ_ERROR = 0xc,
1110 NVME_SC_SGL_INVALID_LAST = 0xd,
1111 NVME_SC_SGL_INVALID_COUNT = 0xe,
1112 NVME_SC_SGL_INVALID_DATA = 0xf,
1113 NVME_SC_SGL_INVALID_METADATA = 0x10,
1114 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001115
1116 NVME_SC_SGL_INVALID_OFFSET = 0x16,
1117 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
1118
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001119 NVME_SC_LBA_RANGE = 0x80,
1120 NVME_SC_CAP_EXCEEDED = 0x81,
1121 NVME_SC_NS_NOT_READY = 0x82,
1122 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001123
1124 /*
1125 * Command Specific Status:
1126 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001127 NVME_SC_CQ_INVALID = 0x100,
1128 NVME_SC_QID_INVALID = 0x101,
1129 NVME_SC_QUEUE_SIZE = 0x102,
1130 NVME_SC_ABORT_LIMIT = 0x103,
1131 NVME_SC_ABORT_MISSING = 0x104,
1132 NVME_SC_ASYNC_LIMIT = 0x105,
1133 NVME_SC_FIRMWARE_SLOT = 0x106,
1134 NVME_SC_FIRMWARE_IMAGE = 0x107,
1135 NVME_SC_INVALID_VECTOR = 0x108,
1136 NVME_SC_INVALID_LOG_PAGE = 0x109,
1137 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001138 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001139 NVME_SC_INVALID_QUEUE = 0x10c,
1140 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
1141 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
1142 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001143 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
1144 NVME_SC_FW_NEEDS_RESET = 0x111,
1145 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
1146 NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
1147 NVME_SC_OVERLAPPING_RANGE = 0x114,
1148 NVME_SC_NS_INSUFFICENT_CAP = 0x115,
1149 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
1150 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
1151 NVME_SC_NS_IS_PRIVATE = 0x119,
1152 NVME_SC_NS_NOT_ATTACHED = 0x11a,
1153 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
1154 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001155
1156 /*
1157 * I/O Command Set Specific - NVM commands:
1158 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001159 NVME_SC_BAD_ATTRIBUTES = 0x180,
1160 NVME_SC_INVALID_PI = 0x181,
1161 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001162 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001163
1164 /*
1165 * I/O Command Set Specific - Fabrics commands:
1166 */
1167 NVME_SC_CONNECT_FORMAT = 0x180,
1168 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
1169 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
1170 NVME_SC_CONNECT_RESTART_DISC = 0x183,
1171 NVME_SC_CONNECT_INVALID_HOST = 0x184,
1172
1173 NVME_SC_DISCOVERY_RESTART = 0x190,
1174 NVME_SC_AUTH_REQUIRED = 0x191,
1175
1176 /*
1177 * Media and Data Integrity Errors:
1178 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001179 NVME_SC_WRITE_FAULT = 0x280,
1180 NVME_SC_READ_ERROR = 0x281,
1181 NVME_SC_GUARD_CHECK = 0x282,
1182 NVME_SC_APPTAG_CHECK = 0x283,
1183 NVME_SC_REFTAG_CHECK = 0x284,
1184 NVME_SC_COMPARE_FAILED = 0x285,
1185 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001186 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001187
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001188 NVME_SC_DNR = 0x4000,
1189};
1190
1191struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001192 /*
1193 * Used by Admin and Fabrics commands to return data:
1194 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001195 union nvme_result {
1196 __le16 u16;
1197 __le32 u32;
1198 __le64 u64;
1199 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001200 __le16 sq_head; /* how much of this queue may be reclaimed */
1201 __le16 sq_id; /* submission queue that generated this entry */
1202 __u16 command_id; /* of the command which completed */
1203 __le16 status; /* did the command fail, and if so, why? */
1204};
1205
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001206#define NVME_VS(major, minor, tertiary) \
1207 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001208
Johannes Thumshirnc61d7882017-06-07 11:45:36 +02001209#define NVME_MAJOR(ver) ((ver) >> 16)
1210#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
1211#define NVME_TERTIARY(ver) ((ver) & 0xff)
1212
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001213#endif /* _LINUX_NVME_H */