blob: 4fb5bb737868ce2db7da41700cd238c8804b086c [file] [log] [blame]
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +02001/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
18#include <linux/pci.h>
19#include <linux/kref.h>
20#include <linux/blk-mq.h>
21
Christoph Hellwig297465c2015-11-26 12:58:11 +010022enum {
23 /*
24 * Driver internal status code for commands that were cancelled due
25 * to timeouts or controller shutdown. The value is negative so
26 * that it a) doesn't overlap with the unsigned hardware error codes,
27 * and b) can easily be tested for.
28 */
29 NVME_SC_CANCELLED = -EINTR,
30};
31
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020032extern unsigned char nvme_io_timeout;
33#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
34
Christoph Hellwig21d34712015-11-26 09:08:36 +010035extern unsigned char admin_timeout;
36#define ADMIN_TIMEOUT (admin_timeout * HZ)
37
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010038extern unsigned char shutdown_timeout;
39#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
40
Matias Bjørlingca064082015-10-29 17:57:29 +090041enum {
42 NVME_NS_LBA = 0,
43 NVME_NS_LIGHTNVM = 1,
44};
45
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020046/*
Christoph Hellwig106198e2015-11-26 10:07:41 +010047 * List of workarounds for devices that required behavior not specified in
48 * the standard.
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020049 */
Christoph Hellwig106198e2015-11-26 10:07:41 +010050enum nvme_quirks {
51 /*
52 * Prefers I/O aligned to a stripe size specified in a vendor
53 * specific Identify field.
54 */
55 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
Keith Busch540c8012015-10-22 15:45:06 -060056
57 /*
58 * The controller doesn't handle Identify value others than 0 or 1
59 * correctly.
60 */
61 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
Christoph Hellwig106198e2015-11-26 10:07:41 +010062};
63
Christoph Hellwig1c63dc62015-11-26 10:06:56 +010064struct nvme_ctrl {
65 const struct nvme_ctrl_ops *ops;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020066 struct request_queue *admin_q;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020067 struct device *dev;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020068 struct kref kref;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020069 int instance;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +010070 struct blk_mq_tag_set *tagset;
71 struct list_head namespaces;
Christoph Hellwig69d3b8a2015-12-24 15:27:00 +010072 struct mutex namespaces_mutex;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +010073 struct device *device; /* char device */
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010074 struct list_head node;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +010075
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020076 char name[12];
77 char serial[20];
78 char model[40];
79 char firmware_rev[8];
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010080
81 u32 ctrl_config;
82
83 u32 page_size;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020084 u32 max_hw_sectors;
85 u32 stripe_size;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020086 u16 oncs;
Christoph Hellwig6bf25d12015-11-20 09:36:44 +010087 atomic_t abort_limit;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020088 u8 event_limit;
89 u8 vwc;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010090 u32 vs;
91 bool subsystem;
Christoph Hellwig106198e2015-11-26 10:07:41 +010092 unsigned long quirks;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020093};
94
95/*
96 * An NVM Express namespace is equivalent to a SCSI LUN
97 */
98struct nvme_ns {
99 struct list_head list;
100
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100101 struct nvme_ctrl *ctrl;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200102 struct request_queue *queue;
103 struct gendisk *disk;
104 struct kref kref;
105
Keith Busch2b9b6e82015-12-22 10:10:45 -0700106 u8 eui[8];
107 u8 uuid[16];
108
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200109 unsigned ns_id;
110 int lba_shift;
111 u16 ms;
112 bool ext;
113 u8 pi_type;
Matias Bjørlingca064082015-10-29 17:57:29 +0900114 int type;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200115 u64 mode_select_num_blocks;
116 u32 mode_select_block_len;
117};
118
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100119struct nvme_ctrl_ops {
120 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100121 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100122 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100123 bool (*io_incapable)(struct nvme_ctrl *ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100124 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100125 void (*free_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200126};
127
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100128static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
129{
130 u32 val = 0;
131
132 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
133 return false;
134 return val & NVME_CSTS_RDY;
135}
136
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100137static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
138{
139 u32 val = 0;
140
141 if (ctrl->ops->io_incapable(ctrl))
142 return false;
143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
144 return false;
145 return val & NVME_CSTS_CFS;
146}
147
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100148static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
149{
150 if (!ctrl->subsystem)
151 return -ENOTTY;
152 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
153}
154
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200155static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
156{
157 return (sector >> (ns->lba_shift - 9));
158}
159
Christoph Hellwig22944e92015-10-16 07:58:40 +0200160static inline void nvme_setup_flush(struct nvme_ns *ns,
161 struct nvme_command *cmnd)
162{
163 memset(cmnd, 0, sizeof(*cmnd));
164 cmnd->common.opcode = nvme_cmd_flush;
165 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
166}
167
168static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
169 struct nvme_command *cmnd)
170{
171 u16 control = 0;
172 u32 dsmgmt = 0;
173
174 if (req->cmd_flags & REQ_FUA)
175 control |= NVME_RW_FUA;
176 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
177 control |= NVME_RW_LR;
178
179 if (req->cmd_flags & REQ_RAHEAD)
180 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
181
182 memset(cmnd, 0, sizeof(*cmnd));
183 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
184 cmnd->rw.command_id = req->tag;
185 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
186 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
187 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
188
189 if (ns->ms) {
190 switch (ns->pi_type) {
191 case NVME_NS_DPS_PI_TYPE3:
192 control |= NVME_RW_PRINFO_PRCHK_GUARD;
193 break;
194 case NVME_NS_DPS_PI_TYPE1:
195 case NVME_NS_DPS_PI_TYPE2:
196 control |= NVME_RW_PRINFO_PRCHK_GUARD |
197 NVME_RW_PRINFO_PRCHK_REF;
198 cmnd->rw.reftag = cpu_to_le32(
199 nvme_block_nr(ns, blk_rq_pos(req)));
200 break;
201 }
202 if (!blk_integrity_rq(req))
203 control |= NVME_RW_PRINFO_PRACT;
204 }
205
206 cmnd->rw.control = cpu_to_le16(control);
207 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
208}
209
210
Christoph Hellwig15a190f2015-10-16 07:58:39 +0200211static inline int nvme_error_status(u16 status)
212{
213 switch (status & 0x7ff) {
214 case NVME_SC_SUCCESS:
215 return 0;
216 case NVME_SC_CAP_EXCEEDED:
217 return -ENOSPC;
218 default:
219 return -EIO;
220 }
221}
222
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100223static inline bool nvme_req_needs_retry(struct request *req, u16 status)
224{
225 return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
226 (jiffies - req->start_time) < req->timeout;
227}
228
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100229int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
230int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
231int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100232int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
233 const struct nvme_ctrl_ops *ops, unsigned long quirks);
Keith Busch53029b02015-11-28 15:41:02 +0100234void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100235void nvme_put_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100236int nvme_init_identify(struct nvme_ctrl *ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100237
238void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
239void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100240
Keith Busch25646262016-01-04 09:10:57 -0700241void nvme_stop_queues(struct nvme_ctrl *ctrl);
242void nvme_start_queues(struct nvme_ctrl *ctrl);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +0100243
Christoph Hellwig41609822015-11-20 09:00:02 +0100244struct request *nvme_alloc_request(struct request_queue *q,
245 struct nvme_command *cmd, unsigned int flags);
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100246void nvme_requeue_req(struct request *req);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200247int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
248 void *buf, unsigned bufflen);
249int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
Christoph Hellwig41609822015-11-20 09:00:02 +0100250 void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
251int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
252 void __user *ubuffer, unsigned bufflen, u32 *result,
253 unsigned timeout);
Keith Busch0b7f1f22015-10-23 09:47:28 -0600254int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
255 void __user *ubuffer, unsigned bufflen,
256 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200257 u32 *result, unsigned timeout);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100258int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
259int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200260 struct nvme_id_ns **id);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100261int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
262int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200263 dma_addr_t dma_addr, u32 *result);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100264int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200265 dma_addr_t dma_addr, u32 *result);
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100266int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200267
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100268extern spinlock_t dev_list_lock;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200269
270struct sg_io_hdr;
271
272int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
273int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
274int nvme_sg_get_version_num(int __user *ip);
275
Keith Buschc4699e72015-11-28 16:49:22 +0100276#ifdef CONFIG_NVM
Matias Bjørlingca064082015-10-29 17:57:29 +0900277int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
278int nvme_nvm_register(struct request_queue *q, char *disk_name);
279void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
Keith Buschc4699e72015-11-28 16:49:22 +0100280#else
281static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
282{
283 return 0;
284}
285
286static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
287
288static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
289{
290 return 0;
291}
292#endif /* CONFIG_NVM */
Matias Bjørlingca064082015-10-29 17:57:29 +0900293
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100294int __init nvme_core_init(void);
295void nvme_core_exit(void);
296
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200297#endif /* _NVME_H */