blob: 9eecb67177dfac7a7a8836433de9ae9eff38be67 [file] [log] [blame]
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +02001/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
18#include <linux/pci.h>
19#include <linux/kref.h>
20#include <linux/blk-mq.h>
Matias Bjørlingb0b4e092016-09-16 14:25:07 +020021#include <linux/lightnvm.h>
Scott Bauera98e58e52017-02-03 12:50:32 -070022#include <linux/sed-opal.h>
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020023
Christoph Hellwig297465c2015-11-26 12:58:11 +010024enum {
25 /*
26 * Driver internal status code for commands that were cancelled due
27 * to timeouts or controller shutdown. The value is negative so
28 * that it a) doesn't overlap with the unsigned hardware error codes,
29 * and b) can easily be tested for.
30 */
31 NVME_SC_CANCELLED = -EINTR,
32};
33
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020034extern unsigned char nvme_io_timeout;
35#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
36
Christoph Hellwig21d34712015-11-26 09:08:36 +010037extern unsigned char admin_timeout;
38#define ADMIN_TIMEOUT (admin_timeout * HZ)
39
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010040extern unsigned char shutdown_timeout;
41#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
42
Sagi Grimberg038bd4c2016-06-13 16:45:28 +020043#define NVME_DEFAULT_KATO 5
44#define NVME_KATO_GRACE 10
45
Matias Bjørlingca064082015-10-29 17:57:29 +090046enum {
47 NVME_NS_LBA = 0,
48 NVME_NS_LIGHTNVM = 1,
49};
50
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020051/*
Christoph Hellwig106198e2015-11-26 10:07:41 +010052 * List of workarounds for devices that required behavior not specified in
53 * the standard.
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +020054 */
Christoph Hellwig106198e2015-11-26 10:07:41 +010055enum nvme_quirks {
56 /*
57 * Prefers I/O aligned to a stripe size specified in a vendor
58 * specific Identify field.
59 */
60 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
Keith Busch540c8012015-10-22 15:45:06 -060061
62 /*
63 * The controller doesn't handle Identify value others than 0 or 1
64 * correctly.
65 */
66 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
Keith Busch08095e72016-03-04 13:15:17 -070067
68 /*
69 * The controller deterministically returns O's on reads to discarded
70 * logical blocks.
71 */
72 NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
Guilherme G. Piccoli54adc012016-06-14 18:22:41 -030073
74 /*
75 * The controller needs a delay before starts checking the device
76 * readiness, which is done by reading the NVME_CSTS_RDY bit.
77 */
78 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
Andy Lutomirskic5552fd2017-02-07 10:08:45 -080079
80 /*
81 * APST should not be used.
82 */
83 NVME_QUIRK_NO_APST = (1 << 4),
Christoph Hellwig106198e2015-11-26 10:07:41 +010084};
85
Christoph Hellwigd49187e2016-11-10 07:32:33 -080086/*
87 * Common request structure for NVMe passthrough. All drivers must have
88 * this structure as the first member of their request-private data.
89 */
90struct nvme_request {
91 struct nvme_command *cmd;
92 union nvme_result result;
Christoph Hellwig44e44b22017-04-05 19:18:11 +020093 u8 retries;
Christoph Hellwigd49187e2016-11-10 07:32:33 -080094};
95
96static inline struct nvme_request *nvme_req(struct request *req)
97{
98 return blk_mq_rq_to_pdu(req);
99}
100
Guilherme G. Piccoli54adc012016-06-14 18:22:41 -0300101/* The below value is the specific amount of delay needed before checking
102 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
103 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
104 * found empirically.
105 */
106#define NVME_QUIRK_DELAY_AMOUNT 2000
107
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200108enum nvme_ctrl_state {
109 NVME_CTRL_NEW,
110 NVME_CTRL_LIVE,
111 NVME_CTRL_RESETTING,
Christoph Hellwigdef61ec2016-07-06 21:55:49 +0900112 NVME_CTRL_RECONNECTING,
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200113 NVME_CTRL_DELETING,
Keith Busch0ff9d4e2016-05-12 08:37:14 -0600114 NVME_CTRL_DEAD,
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200115};
116
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100117struct nvme_ctrl {
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200118 enum nvme_ctrl_state state;
Andy Lutomirskibd4da3a2017-02-22 13:32:36 -0700119 bool identified;
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200120 spinlock_t lock;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100121 const struct nvme_ctrl_ops *ops;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200122 struct request_queue *admin_q;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200123 struct request_queue *connect_q;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200124 struct device *dev;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200125 struct kref kref;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200126 int instance;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100127 struct blk_mq_tag_set *tagset;
128 struct list_head namespaces;
Christoph Hellwig69d3b8a2015-12-24 15:27:00 +0100129 struct mutex namespaces_mutex;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100130 struct device *device; /* char device */
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100131 struct list_head node;
Keith Busch075790e2016-02-24 09:15:53 -0700132 struct ida ns_ida;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100133
Christoph Hellwig4f1244c2017-02-17 13:59:39 +0100134 struct opal_dev *opal_dev;
Scott Bauera98e58e52017-02-03 12:50:32 -0700135
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200136 char name[12];
137 char serial[20];
138 char model[40];
139 char firmware_rev[8];
Christoph Hellwig76e39142016-04-16 14:57:58 -0400140 u16 cntlid;
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100141
142 u32 ctrl_config;
143
144 u32 page_size;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200145 u32 max_hw_sectors;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200146 u16 oncs;
Keith Busch118472a2016-02-18 09:57:48 -0700147 u16 vid;
Scott Bauer8a9ae522017-02-17 13:59:40 +0100148 u16 oacs;
Christoph Hellwig6bf25d12015-11-20 09:36:44 +0100149 atomic_t abort_limit;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200150 u8 event_limit;
151 u8 vwc;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100152 u32 vs;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200153 u32 sgls;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200154 u16 kas;
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800155 u8 npss;
156 u8 apsta;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200157 unsigned int kato;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100158 bool subsystem;
Christoph Hellwig106198e2015-11-26 10:07:41 +0100159 unsigned long quirks;
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800160 struct nvme_id_power_state psd[32];
Christoph Hellwig5955be22016-04-26 13:51:59 +0200161 struct work_struct scan_work;
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200162 struct work_struct async_event_work;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200163 struct delayed_work ka_work;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200164
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800165 /* Power saving configuration */
166 u64 ps_max_latency_us;
167
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200168 /* Fabrics only */
169 u16 sqsize;
170 u32 ioccsz;
171 u32 iorcsz;
172 u16 icdoff;
173 u16 maxcmd;
174 struct nvmf_ctrl_options *opts;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200175};
176
177/*
178 * An NVM Express namespace is equivalent to a SCSI LUN
179 */
180struct nvme_ns {
181 struct list_head list;
182
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100183 struct nvme_ctrl *ctrl;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200184 struct request_queue *queue;
185 struct gendisk *disk;
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200186 struct nvm_dev *ndev;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200187 struct kref kref;
Keith Busch075790e2016-02-24 09:15:53 -0700188 int instance;
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200189
Keith Busch2b9b6e82015-12-22 10:10:45 -0700190 u8 eui[8];
191 u8 uuid[16];
192
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200193 unsigned ns_id;
194 int lba_shift;
195 u16 ms;
196 bool ext;
197 u8 pi_type;
Keith Busch646017a2016-02-24 09:15:54 -0700198 unsigned long flags;
199
200#define NVME_NS_REMOVING 0
Keith Busch69d9a992016-02-24 09:15:56 -0700201#define NVME_NS_DEAD 1
Keith Busch646017a2016-02-24 09:15:54 -0700202
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200203 u64 mode_select_num_blocks;
204 u32 mode_select_block_len;
205};
206
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100207struct nvme_ctrl_ops {
Ming Lin1a353d82016-06-13 16:45:24 +0200208 const char *name;
Sagi Grimberge439bb12016-02-10 10:03:29 -0800209 struct module *module;
Christoph Hellwig07bfcd02016-06-13 16:45:26 +0200210 bool is_fabrics;
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100211 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100212 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100213 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100214 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100215 void (*free_ctrl)(struct nvme_ctrl *ctrl);
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200216 void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
Ming Lin1a353d82016-06-13 16:45:24 +0200217 int (*delete_ctrl)(struct nvme_ctrl *ctrl);
218 const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
219 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200220};
221
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100222static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
223{
224 u32 val = 0;
225
226 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
227 return false;
228 return val & NVME_CSTS_RDY;
229}
230
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100231static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
232{
233 if (!ctrl->subsystem)
234 return -ENOTTY;
235 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
236}
237
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200238static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
239{
240 return (sector >> (ns->lba_shift - 9));
241}
242
Ming Lin69042422016-04-25 14:33:20 -0700243static inline void nvme_cleanup_cmd(struct request *req)
244{
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700245 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
246 kfree(page_address(req->special_vec.bv_page) +
247 req->special_vec.bv_offset);
248 }
Ming Lin69042422016-04-25 14:33:20 -0700249}
250
Christoph Hellwig15a190f72015-10-16 07:58:39 +0200251static inline int nvme_error_status(u16 status)
252{
253 switch (status & 0x7ff) {
254 case NVME_SC_SUCCESS:
255 return 0;
256 case NVME_SC_CAP_EXCEEDED:
257 return -ENOSPC;
258 default:
259 return -EIO;
260 }
261}
262
Christoph Hellwig77f02a72017-03-30 13:41:32 +0200263void nvme_complete_rq(struct request *req);
Ming Linc55a2fd2016-05-18 14:05:02 -0700264void nvme_cancel_request(struct request *req, void *data, bool reserved);
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200265bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
266 enum nvme_ctrl_state new_state);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +0100267int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
268int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
269int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100270int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
271 const struct nvme_ctrl_ops *ops, unsigned long quirks);
Keith Busch53029b02015-11-28 15:41:02 +0100272void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100273void nvme_put_ctrl(struct nvme_ctrl *ctrl);
Christoph Hellwig7fd89302015-11-28 15:37:52 +0100274int nvme_init_identify(struct nvme_ctrl *ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100275
Christoph Hellwig5955be22016-04-26 13:51:59 +0200276void nvme_queue_scan(struct nvme_ctrl *ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100277void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100278
Christoph Hellwig4f1244c2017-02-17 13:59:39 +0100279int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
280 bool send);
Scott Bauera98e58e52017-02-03 12:50:32 -0700281
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200282#define NVME_NR_AERS 1
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800283void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
284 union nvme_result *res);
Christoph Hellwigf866fc422016-04-26 13:52:00 +0200285void nvme_queue_async_events(struct nvme_ctrl *ctrl);
286
Keith Busch25646262016-01-04 09:10:57 -0700287void nvme_stop_queues(struct nvme_ctrl *ctrl);
288void nvme_start_queues(struct nvme_ctrl *ctrl);
Keith Busch69d9a992016-02-24 09:15:56 -0700289void nvme_kill_queues(struct nvme_ctrl *ctrl);
Keith Busch302ad8c2017-03-01 14:22:12 -0500290void nvme_unfreeze(struct nvme_ctrl *ctrl);
291void nvme_wait_freeze(struct nvme_ctrl *ctrl);
292void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
293void nvme_start_freeze(struct nvme_ctrl *ctrl);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +0100294
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200295#define NVME_QID_ANY -1
Christoph Hellwig41609822015-11-20 09:00:02 +0100296struct request *nvme_alloc_request(struct request_queue *q,
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200297 struct nvme_command *cmd, unsigned int flags, int qid);
Ming Lin8093f7c2016-04-12 13:10:14 -0600298int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
299 struct nvme_command *cmd);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200300int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
301 void *buf, unsigned bufflen);
302int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800303 union nvme_result *result, void *buffer, unsigned bufflen,
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200304 unsigned timeout, int qid, int at_head, int flags);
Christoph Hellwig41609822015-11-20 09:00:02 +0100305int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
306 void __user *ubuffer, unsigned bufflen, u32 *result,
307 unsigned timeout);
Keith Busch0b7f1f22015-10-23 09:47:28 -0600308int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
309 void __user *ubuffer, unsigned bufflen,
310 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200311 u32 *result, unsigned timeout);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100312int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
313int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200314 struct nvme_id_ns **id);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100315int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
316int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
Andy Lutomirski1a6fe742016-09-16 11:16:10 -0700317 void *buffer, size_t buflen, u32 *result);
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100318int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
Andy Lutomirski1a6fe742016-09-16 11:16:10 -0700319 void *buffer, size_t buflen, u32 *result);
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100320int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200321void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
322void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200323
324struct sg_io_hdr;
325
326int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
327int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
328int nvme_sg_get_version_num(int __user *ip);
329
Keith Buschc4699e72015-11-28 16:49:22 +0100330#ifdef CONFIG_NVM
Matias Bjørlingca064082015-10-29 17:57:29 +0900331int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100332int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200333void nvme_nvm_unregister(struct nvme_ns *ns);
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100334int nvme_nvm_register_sysfs(struct nvme_ns *ns);
335void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
Matias Bjørling84d4add2017-01-31 13:17:16 +0100336int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
Keith Buschc4699e72015-11-28 16:49:22 +0100337#else
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200338static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100339 int node)
Keith Buschc4699e72015-11-28 16:49:22 +0100340{
341 return 0;
342}
343
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200344static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100345static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
346{
347 return 0;
348}
349static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
Keith Buschc4699e72015-11-28 16:49:22 +0100350static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
351{
352 return 0;
353}
Matias Bjørling84d4add2017-01-31 13:17:16 +0100354static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
355 unsigned long arg)
356{
357 return -ENOTTY;
358}
Matias Bjørling3dc87dd2016-11-28 22:38:53 +0100359#endif /* CONFIG_NVM */
360
Simon A. F. Lund40267ef2016-09-16 14:25:08 +0200361static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
362{
363 return dev_to_disk(dev)->private_data;
364}
Matias Bjørlingca064082015-10-29 17:57:29 +0900365
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100366int __init nvme_core_init(void);
367void nvme_core_exit(void);
368
Christoph Hellwigf11bb3e2015-10-03 15:46:41 +0200369#endif /* _NVME_H */