Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2011-2014, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | */ |
| 13 | |
| 14 | #ifndef _NVME_H |
| 15 | #define _NVME_H |
| 16 | |
| 17 | #include <linux/nvme.h> |
| 18 | #include <linux/pci.h> |
| 19 | #include <linux/kref.h> |
| 20 | #include <linux/blk-mq.h> |
| 21 | |
Christoph Hellwig | 297465c | 2015-11-26 12:58:11 +0100 | [diff] [blame] | 22 | enum { |
| 23 | /* |
| 24 | * Driver internal status code for commands that were cancelled due |
| 25 | * to timeouts or controller shutdown. The value is negative so |
| 26 | * that it a) doesn't overlap with the unsigned hardware error codes, |
| 27 | * and b) can easily be tested for. |
| 28 | */ |
| 29 | NVME_SC_CANCELLED = -EINTR, |
| 30 | }; |
| 31 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 32 | extern unsigned char nvme_io_timeout; |
| 33 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
| 34 | |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 35 | extern unsigned char admin_timeout; |
| 36 | #define ADMIN_TIMEOUT (admin_timeout * HZ) |
| 37 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 38 | extern unsigned char shutdown_timeout; |
| 39 | #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) |
| 40 | |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 41 | enum { |
| 42 | NVME_NS_LBA = 0, |
| 43 | NVME_NS_LIGHTNVM = 1, |
| 44 | }; |
| 45 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 46 | /* |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 47 | * List of workarounds for devices that required behavior not specified in |
| 48 | * the standard. |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 49 | */ |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 50 | enum nvme_quirks { |
| 51 | /* |
| 52 | * Prefers I/O aligned to a stripe size specified in a vendor |
| 53 | * specific Identify field. |
| 54 | */ |
| 55 | NVME_QUIRK_STRIPE_SIZE = (1 << 0), |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 56 | |
| 57 | /* |
| 58 | * The controller doesn't handle Identify value others than 0 or 1 |
| 59 | * correctly. |
| 60 | */ |
| 61 | NVME_QUIRK_IDENTIFY_CNS = (1 << 1), |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 62 | |
| 63 | /* |
| 64 | * The controller deterministically returns O's on reads to discarded |
| 65 | * logical blocks. |
| 66 | */ |
| 67 | NVME_QUIRK_DISCARD_ZEROES = (1 << 2), |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 68 | }; |
| 69 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 70 | enum nvme_ctrl_state { |
| 71 | NVME_CTRL_NEW, |
| 72 | NVME_CTRL_LIVE, |
| 73 | NVME_CTRL_RESETTING, |
| 74 | NVME_CTRL_DELETING, |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 75 | NVME_CTRL_DEAD, |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 76 | }; |
| 77 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 78 | struct nvme_ctrl { |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 79 | enum nvme_ctrl_state state; |
| 80 | spinlock_t lock; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 81 | const struct nvme_ctrl_ops *ops; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 82 | struct request_queue *admin_q; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame^] | 83 | struct request_queue *connect_q; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 84 | struct device *dev; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 85 | struct kref kref; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 86 | int instance; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 87 | struct blk_mq_tag_set *tagset; |
| 88 | struct list_head namespaces; |
Christoph Hellwig | 69d3b8a | 2015-12-24 15:27:00 +0100 | [diff] [blame] | 89 | struct mutex namespaces_mutex; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 90 | struct device *device; /* char device */ |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 91 | struct list_head node; |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 92 | struct ida ns_ida; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 93 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 94 | char name[12]; |
| 95 | char serial[20]; |
| 96 | char model[40]; |
| 97 | char firmware_rev[8]; |
Christoph Hellwig | 76e3914 | 2016-04-16 14:57:58 -0400 | [diff] [blame] | 98 | u16 cntlid; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 99 | |
| 100 | u32 ctrl_config; |
| 101 | |
| 102 | u32 page_size; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 103 | u32 max_hw_sectors; |
| 104 | u32 stripe_size; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 105 | u16 oncs; |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 106 | u16 vid; |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 107 | atomic_t abort_limit; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 108 | u8 event_limit; |
| 109 | u8 vwc; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 110 | u32 vs; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame^] | 111 | u32 sgls; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 112 | bool subsystem; |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 113 | unsigned long quirks; |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 114 | struct work_struct scan_work; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 115 | struct work_struct async_event_work; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame^] | 116 | |
| 117 | /* Fabrics only */ |
| 118 | u16 sqsize; |
| 119 | u32 ioccsz; |
| 120 | u32 iorcsz; |
| 121 | u16 icdoff; |
| 122 | u16 maxcmd; |
| 123 | struct nvmf_ctrl_options *opts; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 124 | }; |
| 125 | |
| 126 | /* |
| 127 | * An NVM Express namespace is equivalent to a SCSI LUN |
| 128 | */ |
| 129 | struct nvme_ns { |
| 130 | struct list_head list; |
| 131 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 132 | struct nvme_ctrl *ctrl; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 133 | struct request_queue *queue; |
| 134 | struct gendisk *disk; |
| 135 | struct kref kref; |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 136 | int instance; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 137 | |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 138 | u8 eui[8]; |
| 139 | u8 uuid[16]; |
| 140 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 141 | unsigned ns_id; |
| 142 | int lba_shift; |
| 143 | u16 ms; |
| 144 | bool ext; |
| 145 | u8 pi_type; |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 146 | int type; |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 147 | unsigned long flags; |
| 148 | |
| 149 | #define NVME_NS_REMOVING 0 |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 150 | #define NVME_NS_DEAD 1 |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 151 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 152 | u64 mode_select_num_blocks; |
| 153 | u32 mode_select_block_len; |
| 154 | }; |
| 155 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 156 | struct nvme_ctrl_ops { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 157 | const char *name; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 158 | struct module *module; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame^] | 159 | bool is_fabrics; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 160 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 161 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 162 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 163 | int (*reset_ctrl)(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 164 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 165 | void (*post_scan)(struct nvme_ctrl *ctrl); |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 166 | void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx); |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 167 | int (*delete_ctrl)(struct nvme_ctrl *ctrl); |
| 168 | const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl); |
| 169 | int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 170 | }; |
| 171 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 172 | static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) |
| 173 | { |
| 174 | u32 val = 0; |
| 175 | |
| 176 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) |
| 177 | return false; |
| 178 | return val & NVME_CSTS_RDY; |
| 179 | } |
| 180 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 181 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) |
| 182 | { |
| 183 | if (!ctrl->subsystem) |
| 184 | return -ENOTTY; |
| 185 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); |
| 186 | } |
| 187 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 188 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) |
| 189 | { |
| 190 | return (sector >> (ns->lba_shift - 9)); |
| 191 | } |
| 192 | |
Ming Lin | 58b4560 | 2016-03-22 00:24:43 -0700 | [diff] [blame] | 193 | static inline unsigned nvme_map_len(struct request *rq) |
| 194 | { |
Mike Christie | c2df40d | 2016-06-05 14:32:17 -0500 | [diff] [blame] | 195 | if (req_op(rq) == REQ_OP_DISCARD) |
Ming Lin | 58b4560 | 2016-03-22 00:24:43 -0700 | [diff] [blame] | 196 | return sizeof(struct nvme_dsm_range); |
| 197 | else |
| 198 | return blk_rq_bytes(rq); |
| 199 | } |
| 200 | |
Ming Lin | 6904242 | 2016-04-25 14:33:20 -0700 | [diff] [blame] | 201 | static inline void nvme_cleanup_cmd(struct request *req) |
| 202 | { |
Mike Christie | c2df40d | 2016-06-05 14:32:17 -0500 | [diff] [blame] | 203 | if (req_op(req) == REQ_OP_DISCARD) |
Ming Lin | 6904242 | 2016-04-25 14:33:20 -0700 | [diff] [blame] | 204 | kfree(req->completion_data); |
| 205 | } |
| 206 | |
Christoph Hellwig | 15a190f7 | 2015-10-16 07:58:39 +0200 | [diff] [blame] | 207 | static inline int nvme_error_status(u16 status) |
| 208 | { |
| 209 | switch (status & 0x7ff) { |
| 210 | case NVME_SC_SUCCESS: |
| 211 | return 0; |
| 212 | case NVME_SC_CAP_EXCEEDED: |
| 213 | return -ENOSPC; |
| 214 | default: |
| 215 | return -EIO; |
| 216 | } |
| 217 | } |
| 218 | |
Christoph Hellwig | 7688faa | 2015-11-28 15:41:58 +0100 | [diff] [blame] | 219 | static inline bool nvme_req_needs_retry(struct request *req, u16 status) |
| 220 | { |
| 221 | return !(status & NVME_SC_DNR || blk_noretry_request(req)) && |
| 222 | (jiffies - req->start_time) < req->timeout; |
| 223 | } |
| 224 | |
Ming Lin | c55a2fd | 2016-05-18 14:05:02 -0700 | [diff] [blame] | 225 | void nvme_cancel_request(struct request *req, void *data, bool reserved); |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 226 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
| 227 | enum nvme_ctrl_state new_state); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 228 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
| 229 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
| 230 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 231 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
| 232 | const struct nvme_ctrl_ops *ops, unsigned long quirks); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 233 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 234 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 235 | int nvme_init_identify(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 236 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 237 | void nvme_queue_scan(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 238 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 239 | |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 240 | #define NVME_NR_AERS 1 |
| 241 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, |
| 242 | struct nvme_completion *cqe); |
| 243 | void nvme_queue_async_events(struct nvme_ctrl *ctrl); |
| 244 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 245 | void nvme_stop_queues(struct nvme_ctrl *ctrl); |
| 246 | void nvme_start_queues(struct nvme_ctrl *ctrl); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 247 | void nvme_kill_queues(struct nvme_ctrl *ctrl); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 248 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 249 | #define NVME_QID_ANY -1 |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 250 | struct request *nvme_alloc_request(struct request_queue *q, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 251 | struct nvme_command *cmd, unsigned int flags, int qid); |
Christoph Hellwig | 7688faa | 2015-11-28 15:41:58 +0100 | [diff] [blame] | 252 | void nvme_requeue_req(struct request *req); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 253 | int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
| 254 | struct nvme_command *cmd); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 255 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 256 | void *buf, unsigned bufflen); |
| 257 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 258 | struct nvme_completion *cqe, void *buffer, unsigned bufflen, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 259 | unsigned timeout, int qid, int at_head, int flags); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 260 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 261 | void __user *ubuffer, unsigned bufflen, u32 *result, |
| 262 | unsigned timeout); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 263 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 264 | void __user *ubuffer, unsigned bufflen, |
| 265 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 266 | u32 *result, unsigned timeout); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 267 | int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id); |
| 268 | int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 269 | struct nvme_id_ns **id); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 270 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log); |
| 271 | int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 272 | dma_addr_t dma_addr, u32 *result); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 273 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 274 | dma_addr_t dma_addr, u32 *result); |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 275 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 276 | |
| 277 | struct sg_io_hdr; |
| 278 | |
| 279 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); |
| 280 | int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); |
| 281 | int nvme_sg_get_version_num(int __user *ip); |
| 282 | |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 283 | #ifdef CONFIG_NVM |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 284 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); |
| 285 | int nvme_nvm_register(struct request_queue *q, char *disk_name); |
| 286 | void nvme_nvm_unregister(struct request_queue *q, char *disk_name); |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 287 | #else |
| 288 | static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) |
| 289 | { |
| 290 | return 0; |
| 291 | } |
| 292 | |
| 293 | static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; |
| 294 | |
| 295 | static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) |
| 296 | { |
| 297 | return 0; |
| 298 | } |
| 299 | #endif /* CONFIG_NVM */ |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 300 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 301 | int __init nvme_core_init(void); |
| 302 | void nvme_core_exit(void); |
| 303 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 304 | #endif /* _NVME_H */ |