blob: 9407f2fa44870d364eb71c35fada98e2e65f352f [file] [log] [blame]
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +02001/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
18#include <linux/pci.h>
19#include <linux/kref.h>
20#include <linux/blk-mq.h>
21
Christoph Hellwig297465c2015-11-26 12:58:11 +010022enum {
23 /*
24 * Driver internal status code for commands that were cancelled due
25 * to timeouts or controller shutdown. The value is negative so
26 * that it a) doesn't overlap with the unsigned hardware error codes,
27 * and b) can easily be tested for.
28 */
29 NVME_SC_CANCELLED = -EINTR,
30};
31
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020032extern unsigned char nvme_io_timeout;
33#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
34
Christoph Hellwig21d34712015-11-26 09:08:36 +010035extern unsigned char admin_timeout;
36#define ADMIN_TIMEOUT (admin_timeout * HZ)
37
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010038extern unsigned char shutdown_timeout;
39#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
40
Matias Bjørlingca064082015-10-29 17:57:29 +090041enum {
42 NVME_NS_LBA = 0,
43 NVME_NS_LIGHTNVM = 1,
44};
45
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020046/*
Christoph Hellwig106198e2015-11-26 10:07:41 +010047 * List of workarounds for devices that required behavior not specified in
48 * the standard.
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020049 */
Christoph Hellwig106198e2015-11-26 10:07:41 +010050enum nvme_quirks {
51 /*
52 * Prefers I/O aligned to a stripe size specified in a vendor
53 * specific Identify field.
54 */
55 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
Keith Busch540c8012015-10-22 15:45:06 -060056
57 /*
58 * The controller doesn't handle Identify value others than 0 or 1
59 * correctly.
60 */
61 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
Christoph Hellwig106198e2015-11-26 10:07:41 +010062};
63
Christoph Hellwig1c63dc62015-11-26 10:06:56 +010064struct nvme_ctrl {
65 const struct nvme_ctrl_ops *ops;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020066 struct request_queue *admin_q;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020067 struct device *dev;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020068 struct kref kref;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020069 int instance;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +010070 struct blk_mq_tag_set *tagset;
71 struct list_head namespaces;
Christoph Hellwig69d3b8a2015-12-24 15:27:00 +010072 struct mutex namespaces_mutex;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +010073 struct device *device; /* char device */
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010074 struct list_head node;
Keith Busch075790e2016-02-24 09:15:53 -070075 struct ida ns_ida;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +010076
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020077 char name[12];
78 char serial[20];
79 char model[40];
80 char firmware_rev[8];
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010081
82 u32 ctrl_config;
83
84 u32 page_size;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020085 u32 max_hw_sectors;
86 u32 stripe_size;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020087 u16 oncs;
Christoph Hellwig6bf25d12015-11-20 09:36:44 +010088 atomic_t abort_limit;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020089 u8 event_limit;
90 u8 vwc;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010091 u32 vs;
92 bool subsystem;
Christoph Hellwig106198e2015-11-26 10:07:41 +010093 unsigned long quirks;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020094};
95
96/*
97 * An NVM Express namespace is equivalent to a SCSI LUN
98 */
99struct nvme_ns {
100 struct list_head list;
101
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100102 struct nvme_ctrl *ctrl;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200103 struct request_queue *queue;
104 struct gendisk *disk;
105 struct kref kref;
Keith Busch075790e2016-02-24 09:15:53 -0700106 int instance;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200107
Keith Busch2b9b6e82015-12-22 10:10:45 -0700108 u8 eui[8];
109 u8 uuid[16];
110
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200111 unsigned ns_id;
112 int lba_shift;
113 u16 ms;
114 bool ext;
115 u8 pi_type;
Matias Bjørlingca064082015-10-29 17:57:29 +0900116 int type;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200117 u64 mode_select_num_blocks;
118 u32 mode_select_block_len;
119};
120
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100121struct nvme_ctrl_ops {
122 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100123 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100124 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100125 bool (*io_incapable)(struct nvme_ctrl *ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100126 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100127 void (*free_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200128};
129
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100130static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
131{
132 u32 val = 0;
133
134 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
135 return false;
136 return val & NVME_CSTS_RDY;
137}
138
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100139static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
140{
141 u32 val = 0;
142
143 if (ctrl->ops->io_incapable(ctrl))
Keith Busch4f76d0e2016-02-11 13:05:39 -0700144 return true;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100145 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
Keith Busch4f76d0e2016-02-11 13:05:39 -0700146 return true;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100147 return val & NVME_CSTS_CFS;
148}
149
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100150static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
151{
152 if (!ctrl->subsystem)
153 return -ENOTTY;
154 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
155}
156
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200157static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
158{
159 return (sector >> (ns->lba_shift - 9));
160}
161
Christoph Hellwig22944e92015-10-16 07:58:40 +0200162static inline void nvme_setup_flush(struct nvme_ns *ns,
163 struct nvme_command *cmnd)
164{
165 memset(cmnd, 0, sizeof(*cmnd));
166 cmnd->common.opcode = nvme_cmd_flush;
167 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
168}
169
170static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
171 struct nvme_command *cmnd)
172{
173 u16 control = 0;
174 u32 dsmgmt = 0;
175
176 if (req->cmd_flags & REQ_FUA)
177 control |= NVME_RW_FUA;
178 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
179 control |= NVME_RW_LR;
180
181 if (req->cmd_flags & REQ_RAHEAD)
182 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
183
184 memset(cmnd, 0, sizeof(*cmnd));
185 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
186 cmnd->rw.command_id = req->tag;
187 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
188 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
189 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
190
191 if (ns->ms) {
192 switch (ns->pi_type) {
193 case NVME_NS_DPS_PI_TYPE3:
194 control |= NVME_RW_PRINFO_PRCHK_GUARD;
195 break;
196 case NVME_NS_DPS_PI_TYPE1:
197 case NVME_NS_DPS_PI_TYPE2:
198 control |= NVME_RW_PRINFO_PRCHK_GUARD |
199 NVME_RW_PRINFO_PRCHK_REF;
200 cmnd->rw.reftag = cpu_to_le32(
201 nvme_block_nr(ns, blk_rq_pos(req)));
202 break;
203 }
204 if (!blk_integrity_rq(req))
205 control |= NVME_RW_PRINFO_PRACT;
206 }
207
208 cmnd->rw.control = cpu_to_le16(control);
209 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
210}
211
212
Christoph Hellwig15a190f72015-10-16 07:58:39 +0200213static inline int nvme_error_status(u16 status)
214{
215 switch (status & 0x7ff) {
216 case NVME_SC_SUCCESS:
217 return 0;
218 case NVME_SC_CAP_EXCEEDED:
219 return -ENOSPC;
220 default:
221 return -EIO;
222 }
223}
224
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100225static inline bool nvme_req_needs_retry(struct request *req, u16 status)
226{
227 return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
228 (jiffies - req->start_time) < req->timeout;
229}
230
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100231int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
232int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
233int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100234int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
235 const struct nvme_ctrl_ops *ops, unsigned long quirks);
Keith Busch53029b02015-11-28 15:41:02 +0100236void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100237void nvme_put_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100238int nvme_init_identify(struct nvme_ctrl *ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100239
240void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
241void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100242
Keith Busch25646262016-01-04 09:10:57 -0700243void nvme_stop_queues(struct nvme_ctrl *ctrl);
244void nvme_start_queues(struct nvme_ctrl *ctrl);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +0100245
Christoph Hellwig41609822015-11-20 09:00:02 +0100246struct request *nvme_alloc_request(struct request_queue *q,
247 struct nvme_command *cmd, unsigned int flags);
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100248void nvme_requeue_req(struct request *req);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200249int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
250 void *buf, unsigned bufflen);
251int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
Christoph Hellwig41609822015-11-20 09:00:02 +0100252 void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
253int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
254 void __user *ubuffer, unsigned bufflen, u32 *result,
255 unsigned timeout);
Keith Busch0b7f1f22015-10-23 09:47:28 -0600256int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
257 void __user *ubuffer, unsigned bufflen,
258 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200259 u32 *result, unsigned timeout);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100260int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
261int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200262 struct nvme_id_ns **id);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100263int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
264int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200265 dma_addr_t dma_addr, u32 *result);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100266int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200267 dma_addr_t dma_addr, u32 *result);
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100268int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200269
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100270extern spinlock_t dev_list_lock;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200271
272struct sg_io_hdr;
273
274int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
275int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
276int nvme_sg_get_version_num(int __user *ip);
277
Keith Buschc4699e72015-11-28 16:49:22 +0100278#ifdef CONFIG_NVM
Matias Bjørlingca064082015-10-29 17:57:29 +0900279int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
280int nvme_nvm_register(struct request_queue *q, char *disk_name);
281void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
Keith Buschc4699e72015-11-28 16:49:22 +0100282#else
283static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
284{
285 return 0;
286}
287
288static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
289
290static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
291{
292 return 0;
293}
294#endif /* CONFIG_NVM */
Matias Bjørlingca064082015-10-29 17:57:29 +0900295
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100296int __init nvme_core_init(void);
297void nvme_core_exit(void);
298
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200299#endif /* _NVME_H */