blob: b0977229e219994e8e1aa1ba737d0e0b6735a6ca [file] [log] [blame]
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +02001/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
18#include <linux/pci.h>
19#include <linux/kref.h>
20#include <linux/blk-mq.h>
Matias Bjørlingb0b4e092016-09-16 14:25:07 +020021#include <linux/lightnvm.h>
Scott Bauera98e58e52017-02-03 12:50:32 -070022#include <linux/sed-opal.h>
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020023
Christoph Hellwig297465c2015-11-26 12:58:11 +010024enum {
25 /*
26 * Driver internal status code for commands that were cancelled due
27 * to timeouts or controller shutdown. The value is negative so
28 * that it a) doesn't overlap with the unsigned hardware error codes,
29 * and b) can easily be tested for.
30 */
31 NVME_SC_CANCELLED = -EINTR,
32};
33
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020034extern unsigned char nvme_io_timeout;
35#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
36
Christoph Hellwig21d34712015-11-26 09:08:36 +010037extern unsigned char admin_timeout;
38#define ADMIN_TIMEOUT (admin_timeout * HZ)
39
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010040extern unsigned char shutdown_timeout;
41#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
42
Sagi Grimberg038bd4c2016-06-13 16:45:28 +020043#define NVME_DEFAULT_KATO 5
44#define NVME_KATO_GRACE 10
45
Keith Buschf80ec962016-07-12 16:20:31 -070046extern unsigned int nvme_max_retries;
47
Matias Bjørlingca064082015-10-29 17:57:29 +090048enum {
49 NVME_NS_LBA = 0,
50 NVME_NS_LIGHTNVM = 1,
51};
52
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020053/*
Christoph Hellwig106198e2015-11-26 10:07:41 +010054 * List of workarounds for devices that required behavior not specified in
55 * the standard.
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020056 */
Christoph Hellwig106198e2015-11-26 10:07:41 +010057enum nvme_quirks {
58 /*
59 * Prefers I/O aligned to a stripe size specified in a vendor
60 * specific Identify field.
61 */
62 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
Keith Busch540c8012015-10-22 15:45:06 -060063
64 /*
65 * The controller doesn't handle Identify value others than 0 or 1
66 * correctly.
67 */
68 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
Keith Busch08095e72016-03-04 13:15:17 -070069
70 /*
71 * The controller deterministically returns O's on reads to discarded
72 * logical blocks.
73 */
74 NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
Guilherme G. Piccoli54adc012016-06-14 18:22:41 -030075
76 /*
77 * The controller needs a delay before starts checking the device
78 * readiness, which is done by reading the NVME_CSTS_RDY bit.
79 */
80 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
Christoph Hellwig106198e2015-11-26 10:07:41 +010081};
82
Christoph Hellwigd49187e2016-11-10 07:32:33 -080083/*
84 * Common request structure for NVMe passthrough. All drivers must have
85 * this structure as the first member of their request-private data.
86 */
87struct nvme_request {
88 struct nvme_command *cmd;
89 union nvme_result result;
90};
91
92static inline struct nvme_request *nvme_req(struct request *req)
93{
94 return blk_mq_rq_to_pdu(req);
95}
96
Guilherme G. Piccoli54adc012016-06-14 18:22:41 -030097/* The below value is the specific amount of delay needed before checking
98 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
99 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
100 * found empirically.
101 */
102#define NVME_QUIRK_DELAY_AMOUNT 2000
103
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200104enum nvme_ctrl_state {
105 NVME_CTRL_NEW,
106 NVME_CTRL_LIVE,
107 NVME_CTRL_RESETTING,
Christoph Hellwigdef61ec2016-07-06 21:55:49 +0900108 NVME_CTRL_RECONNECTING,
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200109 NVME_CTRL_DELETING,
Keith Busch0ff9d4e2016-05-12 08:37:14 -0600110 NVME_CTRL_DEAD,
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200111};
112
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100113struct nvme_ctrl {
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200114 enum nvme_ctrl_state state;
115 spinlock_t lock;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100116 const struct nvme_ctrl_ops *ops;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200117 struct request_queue *admin_q;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200118 struct request_queue *connect_q;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200119 struct device *dev;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200120 struct kref kref;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200121 int instance;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100122 struct blk_mq_tag_set *tagset;
123 struct list_head namespaces;
Christoph Hellwig69d3b8a2015-12-24 15:27:00 +0100124 struct mutex namespaces_mutex;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100125 struct device *device; /* char device */
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100126 struct list_head node;
Keith Busch075790e2016-02-24 09:15:53 -0700127 struct ida ns_ida;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100128
Christoph Hellwig4f1244c2017-02-17 13:59:39 +0100129 struct opal_dev *opal_dev;
Scott Bauera98e58e52017-02-03 12:50:32 -0700130
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200131 char name[12];
132 char serial[20];
133 char model[40];
134 char firmware_rev[8];
Christoph Hellwig76e39142016-04-16 14:57:58 -0400135 u16 cntlid;
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100136
137 u32 ctrl_config;
138
139 u32 page_size;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200140 u32 max_hw_sectors;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200141 u16 oncs;
Keith Busch118472a2016-02-18 09:57:48 -0700142 u16 vid;
Scott Bauer8a9ae522017-02-17 13:59:40 +0100143 u16 oacs;
Christoph Hellwig6bf25d12015-11-20 09:36:44 +0100144 atomic_t abort_limit;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200145 u8 event_limit;
146 u8 vwc;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100147 u32 vs;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200148 u32 sgls;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200149 u16 kas;
150 unsigned int kato;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100151 bool subsystem;
Christoph Hellwig106198e2015-11-26 10:07:41 +0100152 unsigned long quirks;
Christoph Hellwig5955be22016-04-26 13:51:59 +0200153 struct work_struct scan_work;
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200154 struct work_struct async_event_work;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200155 struct delayed_work ka_work;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200156
157 /* Fabrics only */
158 u16 sqsize;
159 u32 ioccsz;
160 u32 iorcsz;
161 u16 icdoff;
162 u16 maxcmd;
163 struct nvmf_ctrl_options *opts;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200164};
165
166/*
167 * An NVM Express namespace is equivalent to a SCSI LUN
168 */
169struct nvme_ns {
170 struct list_head list;
171
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100172 struct nvme_ctrl *ctrl;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200173 struct request_queue *queue;
174 struct gendisk *disk;
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200175 struct nvm_dev *ndev;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200176 struct kref kref;
Keith Busch075790e2016-02-24 09:15:53 -0700177 int instance;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200178
Keith Busch2b9b6e82015-12-22 10:10:45 -0700179 u8 eui[8];
180 u8 uuid[16];
181
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200182 unsigned ns_id;
183 int lba_shift;
184 u16 ms;
185 bool ext;
186 u8 pi_type;
Keith Busch646017a2016-02-24 09:15:54 -0700187 unsigned long flags;
188
189#define NVME_NS_REMOVING 0
Keith Busch69d9a992016-02-24 09:15:56 -0700190#define NVME_NS_DEAD 1
Keith Busch646017a2016-02-24 09:15:54 -0700191
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200192 u64 mode_select_num_blocks;
193 u32 mode_select_block_len;
194};
195
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100196struct nvme_ctrl_ops {
Ming Lin1a353d82016-06-13 16:45:24 +0200197 const char *name;
Sagi Grimberge439bb12016-02-10 10:03:29 -0800198 struct module *module;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200199 bool is_fabrics;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100200 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100201 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100202 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100203 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100204 void (*free_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200205 void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
Ming Lin1a353d82016-06-13 16:45:24 +0200206 int (*delete_ctrl)(struct nvme_ctrl *ctrl);
207 const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
208 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200209};
210
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100211static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
212{
213 u32 val = 0;
214
215 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
216 return false;
217 return val & NVME_CSTS_RDY;
218}
219
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100220static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
221{
222 if (!ctrl->subsystem)
223 return -ENOTTY;
224 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
225}
226
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200227static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
228{
229 return (sector >> (ns->lba_shift - 9));
230}
231
Ming Lin58b45602016-03-22 00:24:43 -0700232static inline unsigned nvme_map_len(struct request *rq)
233{
Mike Christiec2df40d2016-06-05 14:32:17 -0500234 if (req_op(rq) == REQ_OP_DISCARD)
Ming Lin58b45602016-03-22 00:24:43 -0700235 return sizeof(struct nvme_dsm_range);
236 else
237 return blk_rq_bytes(rq);
238}
239
Ming Lin69042422016-04-25 14:33:20 -0700240static inline void nvme_cleanup_cmd(struct request *req)
241{
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700242 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
243 kfree(page_address(req->special_vec.bv_page) +
244 req->special_vec.bv_offset);
245 }
Ming Lin69042422016-04-25 14:33:20 -0700246}
247
Christoph Hellwig15a190f72015-10-16 07:58:39 +0200248static inline int nvme_error_status(u16 status)
249{
250 switch (status & 0x7ff) {
251 case NVME_SC_SUCCESS:
252 return 0;
253 case NVME_SC_CAP_EXCEEDED:
254 return -ENOSPC;
255 default:
256 return -EIO;
257 }
258}
259
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100260static inline bool nvme_req_needs_retry(struct request *req, u16 status)
261{
262 return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
Keith Buschf80ec962016-07-12 16:20:31 -0700263 (jiffies - req->start_time) < req->timeout &&
264 req->retries < nvme_max_retries;
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100265}
266
Ming Linc55a2fd2016-05-18 14:05:02 -0700267void nvme_cancel_request(struct request *req, void *data, bool reserved);
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200268bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
269 enum nvme_ctrl_state new_state);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100270int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
271int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
272int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100273int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
274 const struct nvme_ctrl_ops *ops, unsigned long quirks);
Keith Busch53029b02015-11-28 15:41:02 +0100275void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100276void nvme_put_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100277int nvme_init_identify(struct nvme_ctrl *ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100278
Christoph Hellwig5955be22016-04-26 13:51:59 +0200279void nvme_queue_scan(struct nvme_ctrl *ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100280void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100281
Christoph Hellwig4f1244c2017-02-17 13:59:39 +0100282int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
283 bool send);
Scott Bauera98e58e52017-02-03 12:50:32 -0700284
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200285#define NVME_NR_AERS 1
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800286void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
287 union nvme_result *res);
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200288void nvme_queue_async_events(struct nvme_ctrl *ctrl);
289
Keith Busch25646262016-01-04 09:10:57 -0700290void nvme_stop_queues(struct nvme_ctrl *ctrl);
291void nvme_start_queues(struct nvme_ctrl *ctrl);
Keith Busch69d9a992016-02-24 09:15:56 -0700292void nvme_kill_queues(struct nvme_ctrl *ctrl);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +0100293
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200294#define NVME_QID_ANY -1
Christoph Hellwig41609822015-11-20 09:00:02 +0100295struct request *nvme_alloc_request(struct request_queue *q,
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200296 struct nvme_command *cmd, unsigned int flags, int qid);
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100297void nvme_requeue_req(struct request *req);
Ming Lin8093f7c2016-04-12 13:10:14 -0600298int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
299 struct nvme_command *cmd);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200300int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
301 void *buf, unsigned bufflen);
302int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800303 union nvme_result *result, void *buffer, unsigned bufflen,
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200304 unsigned timeout, int qid, int at_head, int flags);
Christoph Hellwig41609822015-11-20 09:00:02 +0100305int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
306 void __user *ubuffer, unsigned bufflen, u32 *result,
307 unsigned timeout);
Keith Busch0b7f1f22015-10-23 09:47:28 -0600308int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
309 void __user *ubuffer, unsigned bufflen,
310 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200311 u32 *result, unsigned timeout);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100312int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
313int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200314 struct nvme_id_ns **id);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100315int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
316int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
Andy Lutomirski1a6fe742016-09-16 11:16:10 -0700317 void *buffer, size_t buflen, u32 *result);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100318int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
Andy Lutomirski1a6fe742016-09-16 11:16:10 -0700319 void *buffer, size_t buflen, u32 *result);
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100320int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200321void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
322void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200323
324struct sg_io_hdr;
325
326int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
327int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
328int nvme_sg_get_version_num(int __user *ip);
329
Keith Buschc4699e72015-11-28 16:49:22 +0100330#ifdef CONFIG_NVM
Matias Bjørlingca064082015-10-29 17:57:29 +0900331int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100332int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200333void nvme_nvm_unregister(struct nvme_ns *ns);
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100334int nvme_nvm_register_sysfs(struct nvme_ns *ns);
335void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
Matias Bjørling84d4add2017-01-31 13:17:16 +0100336int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
Keith Buschc4699e72015-11-28 16:49:22 +0100337#else
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200338static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100339 int node)
Keith Buschc4699e72015-11-28 16:49:22 +0100340{
341 return 0;
342}
343
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200344static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100345static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
346{
347 return 0;
348}
349static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
Keith Buschc4699e72015-11-28 16:49:22 +0100350static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
351{
352 return 0;
353}
Matias Bjørling84d4add2017-01-31 13:17:16 +0100354static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
355 unsigned long arg)
356{
357 return -ENOTTY;
358}
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100359#endif /* CONFIG_NVM */
360
Simon A. F. Lund40267ef2016-09-16 14:25:08 +0200361static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
362{
363 return dev_to_disk(dev)->private_data;
364}
Matias Bjørlingca064082015-10-29 17:57:29 +0900365
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100366int __init nvme_core_init(void);
367void nvme_core_exit(void);
368
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200369#endif /* _NVME_H */