Christoph Hellwig | 5f37396 | 2019-02-18 09:36:08 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2 | /* |
| 3 | * NVM Express device driver |
Matthew Wilcox | 6eb0d69 | 2014-03-24 10:11:22 -0400 | [diff] [blame] | 4 | * Copyright (c) 2011-2014, Intel Corporation. |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 5 | */ |
| 6 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 7 | #include <linux/aer.h> |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 8 | #include <linux/async.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 9 | #include <linux/blkdev.h> |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 10 | #include <linux/blk-mq.h> |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 11 | #include <linux/blk-mq-pci.h> |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 12 | #include <linux/dmi.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 13 | #include <linux/init.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 16 | #include <linux/mm.h> |
| 17 | #include <linux/module.h> |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 18 | #include <linux/mutex.h> |
Keith Busch | d087747 | 2017-09-15 13:05:38 -0400 | [diff] [blame] | 19 | #include <linux/once.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 20 | #include <linux/pci.h> |
Keith Busch | e1e5e56 | 2015-02-19 13:39:03 -0700 | [diff] [blame] | 21 | #include <linux/t10-pi.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 22 | #include <linux/types.h> |
Linus Torvalds | 9cf5c09 | 2015-11-06 14:22:15 -0800 | [diff] [blame] | 23 | #include <linux/io-64-nonatomic-lo-hi.h> |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 24 | #include <linux/sed-opal.h> |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 25 | #include <linux/pci-p2pdma.h> |
Hitoshi Mitake | 797a796 | 2012-02-07 11:45:33 +0900 | [diff] [blame] | 26 | |
yupeng | 604c01d | 2018-12-18 17:59:53 +0100 | [diff] [blame] | 27 | #include "trace.h" |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 28 | #include "nvme.h" |
| 29 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 30 | #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) |
| 31 | #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) |
Stephen Bates | c965809 | 2016-12-16 11:54:50 -0700 | [diff] [blame] | 32 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 33 | #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 34 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 35 | /* |
| 36 | * These can be higher, but we need to ensure that any command doesn't |
| 37 | * require an sg allocation that needs more than a page of data. |
| 38 | */ |
| 39 | #define NVME_MAX_KB_SZ 4096 |
| 40 | #define NVME_MAX_SEGS 127 |
| 41 | |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 42 | static int use_threaded_interrupts; |
| 43 | module_param(use_threaded_interrupts, int, 0); |
| 44 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 45 | static bool use_cmb_sqes = true; |
Keith Busch | 69f4eb9 | 2018-06-06 08:13:09 -0600 | [diff] [blame] | 46 | module_param(use_cmb_sqes, bool, 0444); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 47 | MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); |
| 48 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 49 | static unsigned int max_host_mem_size_mb = 128; |
| 50 | module_param(max_host_mem_size_mb, uint, 0444); |
| 51 | MODULE_PARM_DESC(max_host_mem_size_mb, |
| 52 | "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 53 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 54 | static unsigned int sgl_threshold = SZ_32K; |
| 55 | module_param(sgl_threshold, uint, 0644); |
| 56 | MODULE_PARM_DESC(sgl_threshold, |
| 57 | "Use SGLs when average request segment size is larger or equal to " |
| 58 | "this size. Use 0 to disable SGLs."); |
| 59 | |
weiping zhang | b27c1e6 | 2017-07-10 16:46:59 +0800 | [diff] [blame] | 60 | static int io_queue_depth_set(const char *val, const struct kernel_param *kp); |
| 61 | static const struct kernel_param_ops io_queue_depth_ops = { |
| 62 | .set = io_queue_depth_set, |
| 63 | .get = param_get_int, |
| 64 | }; |
| 65 | |
| 66 | static int io_queue_depth = 1024; |
| 67 | module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); |
| 68 | MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); |
| 69 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 70 | static int queue_count_set(const char *val, const struct kernel_param *kp); |
| 71 | static const struct kernel_param_ops queue_count_ops = { |
| 72 | .set = queue_count_set, |
| 73 | .get = param_get_int, |
| 74 | }; |
| 75 | |
| 76 | static int write_queues; |
| 77 | module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644); |
| 78 | MODULE_PARM_DESC(write_queues, |
| 79 | "Number of queues to use for writes. If not set, reads and writes " |
| 80 | "will share a queue set."); |
| 81 | |
Jens Axboe | a4668d9 | 2018-11-19 08:18:24 -0700 | [diff] [blame] | 82 | static int poll_queues = 0; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 83 | module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644); |
| 84 | MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); |
| 85 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 86 | struct nvme_dev; |
| 87 | struct nvme_queue; |
Keith Busch | b3fffde | 2015-02-03 11:21:42 -0700 | [diff] [blame] | 88 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 89 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 90 | static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); |
Keith Busch | d4b4ff8 | 2013-12-10 13:10:37 -0700 | [diff] [blame] | 91 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 92 | /* |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 93 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
| 94 | */ |
| 95 | struct nvme_dev { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 96 | struct nvme_queue *queues; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 97 | struct blk_mq_tag_set tagset; |
| 98 | struct blk_mq_tag_set admin_tagset; |
| 99 | u32 __iomem *dbs; |
| 100 | struct device *dev; |
| 101 | struct dma_pool *prp_page_pool; |
| 102 | struct dma_pool *prp_small_pool; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 103 | unsigned online_queues; |
| 104 | unsigned max_qid; |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 105 | unsigned io_queues[HCTX_MAX_TYPES]; |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 106 | unsigned int num_vecs; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 107 | int q_depth; |
| 108 | u32 db_stride; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 109 | void __iomem *bar; |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 110 | unsigned long bar_mapped_size; |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 111 | struct work_struct remove_work; |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 112 | struct mutex shutdown_lock; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 113 | bool subsystem; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 114 | u64 cmb_size; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 115 | bool cmb_use_sqes; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 116 | u32 cmbsz; |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 117 | u32 cmbloc; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 118 | struct nvme_ctrl ctrl; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 119 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 120 | mempool_t *iod_mempool; |
| 121 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 122 | /* shadow doorbell buffer support: */ |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 123 | u32 *dbbuf_dbs; |
| 124 | dma_addr_t dbbuf_dbs_dma_addr; |
| 125 | u32 *dbbuf_eis; |
| 126 | dma_addr_t dbbuf_eis_dma_addr; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 127 | |
| 128 | /* host memory buffer support: */ |
| 129 | u64 host_mem_size; |
| 130 | u32 nr_host_mem_descs; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 131 | dma_addr_t host_mem_descs_dma; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 132 | struct nvme_host_mem_buf_desc *host_mem_descs; |
| 133 | void **host_mem_desc_bufs; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 134 | }; |
| 135 | |
weiping zhang | b27c1e6 | 2017-07-10 16:46:59 +0800 | [diff] [blame] | 136 | static int io_queue_depth_set(const char *val, const struct kernel_param *kp) |
| 137 | { |
| 138 | int n = 0, ret; |
| 139 | |
| 140 | ret = kstrtoint(val, 10, &n); |
| 141 | if (ret != 0 || n < 2) |
| 142 | return -EINVAL; |
| 143 | |
| 144 | return param_set_int(val, kp); |
| 145 | } |
| 146 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 147 | static int queue_count_set(const char *val, const struct kernel_param *kp) |
| 148 | { |
| 149 | int n = 0, ret; |
| 150 | |
| 151 | ret = kstrtoint(val, 10, &n); |
Bart Van Assche | e895fed | 2019-02-14 14:50:54 -0800 | [diff] [blame] | 152 | if (ret) |
| 153 | return ret; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 154 | if (n > num_possible_cpus()) |
| 155 | n = num_possible_cpus(); |
| 156 | |
| 157 | return param_set_int(val, kp); |
| 158 | } |
| 159 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 160 | static inline unsigned int sq_idx(unsigned int qid, u32 stride) |
| 161 | { |
| 162 | return qid * 2 * stride; |
| 163 | } |
| 164 | |
| 165 | static inline unsigned int cq_idx(unsigned int qid, u32 stride) |
| 166 | { |
| 167 | return (qid * 2 + 1) * stride; |
| 168 | } |
| 169 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 170 | static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) |
| 171 | { |
| 172 | return container_of(ctrl, struct nvme_dev, ctrl); |
| 173 | } |
| 174 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 175 | /* |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 176 | * An NVM Express queue. Each device has at least two (one for admin |
| 177 | * commands and one for I/O commands). |
| 178 | */ |
| 179 | struct nvme_queue { |
| 180 | struct device *q_dmadev; |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 181 | struct nvme_dev *dev; |
Jens Axboe | 1ab0cd6 | 2018-05-17 18:31:51 +0200 | [diff] [blame] | 182 | spinlock_t sq_lock; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 183 | struct nvme_command *sq_cmds; |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 184 | /* only used for poll queues: */ |
| 185 | spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 186 | volatile struct nvme_completion *cqes; |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 187 | struct blk_mq_tags **tags; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 188 | dma_addr_t sq_dma_addr; |
| 189 | dma_addr_t cq_dma_addr; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 190 | u32 __iomem *q_db; |
| 191 | u16 q_depth; |
Jens Axboe | 6222d17 | 2015-01-15 15:19:10 -0700 | [diff] [blame] | 192 | s16 cq_vector; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 193 | u16 sq_tail; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 194 | u16 last_sq_tail; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 195 | u16 cq_head; |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 196 | u16 last_cq_head; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 197 | u16 qid; |
Matthew Wilcox | e9539f4 | 2013-06-24 11:47:34 -0400 | [diff] [blame] | 198 | u8 cq_phase; |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 199 | unsigned long flags; |
| 200 | #define NVMEQ_ENABLED 0 |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 201 | #define NVMEQ_SQ_CMB 1 |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 202 | #define NVMEQ_DELETE_ERROR 2 |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 203 | u32 *dbbuf_sq_db; |
| 204 | u32 *dbbuf_cq_db; |
| 205 | u32 *dbbuf_sq_ei; |
| 206 | u32 *dbbuf_cq_ei; |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 207 | struct completion delete_done; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 208 | }; |
| 209 | |
| 210 | /* |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 211 | * The nvme_iod describes the data in an I/O, including the list of PRP |
| 212 | * entries. You can't see it in this data structure because C doesn't let |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 213 | * me express that. Use nvme_init_iod to ensure there's enough space |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 214 | * allocated to store the PRP list. |
| 215 | */ |
| 216 | struct nvme_iod { |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 217 | struct nvme_request req; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 218 | struct nvme_queue *nvmeq; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 219 | bool use_sgl; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 220 | int aborted; |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 221 | int npages; /* In the PRP list. 0 means small pool in use */ |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 222 | int nents; /* Used in scatterlist */ |
| 223 | int length; /* Of data, in bytes */ |
| 224 | dma_addr_t first_dma; |
Christoph Hellwig | bf68405 | 2015-10-26 17:12:51 +0900 | [diff] [blame] | 225 | struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 226 | struct scatterlist *sg; |
| 227 | struct scatterlist inline_sg[0]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 228 | }; |
| 229 | |
| 230 | /* |
| 231 | * Check we didin't inadvertently grow the command struct |
| 232 | */ |
| 233 | static inline void _nvme_check_size(void) |
| 234 | { |
| 235 | BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); |
| 236 | BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); |
| 237 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); |
| 238 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); |
| 239 | BUILD_BUG_ON(sizeof(struct nvme_features) != 64); |
Vishal Verma | f8ebf84 | 2013-03-27 07:13:41 -0400 | [diff] [blame] | 240 | BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 241 | BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 242 | BUILD_BUG_ON(sizeof(struct nvme_command) != 64); |
Johannes Thumshirn | 0add5e8 | 2017-06-07 11:45:29 +0200 | [diff] [blame] | 243 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); |
| 244 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 245 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); |
Keith Busch | 6ecec74 | 2012-09-26 12:49:27 -0600 | [diff] [blame] | 246 | BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 247 | BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); |
| 248 | } |
| 249 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 250 | static unsigned int max_io_queues(void) |
| 251 | { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 252 | return num_possible_cpus() + write_queues + poll_queues; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | static unsigned int max_queue_count(void) |
| 256 | { |
| 257 | /* IO queues + admin queue */ |
| 258 | return 1 + max_io_queues(); |
| 259 | } |
| 260 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 261 | static inline unsigned int nvme_dbbuf_size(u32 stride) |
| 262 | { |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 263 | return (max_queue_count() * 8 * stride); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) |
| 267 | { |
| 268 | unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); |
| 269 | |
| 270 | if (dev->dbbuf_dbs) |
| 271 | return 0; |
| 272 | |
| 273 | dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, |
| 274 | &dev->dbbuf_dbs_dma_addr, |
| 275 | GFP_KERNEL); |
| 276 | if (!dev->dbbuf_dbs) |
| 277 | return -ENOMEM; |
| 278 | dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, |
| 279 | &dev->dbbuf_eis_dma_addr, |
| 280 | GFP_KERNEL); |
| 281 | if (!dev->dbbuf_eis) { |
| 282 | dma_free_coherent(dev->dev, mem_size, |
| 283 | dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); |
| 284 | dev->dbbuf_dbs = NULL; |
| 285 | return -ENOMEM; |
| 286 | } |
| 287 | |
| 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | static void nvme_dbbuf_dma_free(struct nvme_dev *dev) |
| 292 | { |
| 293 | unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); |
| 294 | |
| 295 | if (dev->dbbuf_dbs) { |
| 296 | dma_free_coherent(dev->dev, mem_size, |
| 297 | dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); |
| 298 | dev->dbbuf_dbs = NULL; |
| 299 | } |
| 300 | if (dev->dbbuf_eis) { |
| 301 | dma_free_coherent(dev->dev, mem_size, |
| 302 | dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); |
| 303 | dev->dbbuf_eis = NULL; |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | static void nvme_dbbuf_init(struct nvme_dev *dev, |
| 308 | struct nvme_queue *nvmeq, int qid) |
| 309 | { |
| 310 | if (!dev->dbbuf_dbs || !qid) |
| 311 | return; |
| 312 | |
| 313 | nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; |
| 314 | nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; |
| 315 | nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; |
| 316 | nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; |
| 317 | } |
| 318 | |
| 319 | static void nvme_dbbuf_set(struct nvme_dev *dev) |
| 320 | { |
| 321 | struct nvme_command c; |
| 322 | |
| 323 | if (!dev->dbbuf_dbs) |
| 324 | return; |
| 325 | |
| 326 | memset(&c, 0, sizeof(c)); |
| 327 | c.dbbuf.opcode = nvme_admin_dbbuf; |
| 328 | c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); |
| 329 | c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); |
| 330 | |
| 331 | if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { |
Christoph Hellwig | 9bdcfb1 | 2017-05-20 15:14:43 +0200 | [diff] [blame] | 332 | dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 333 | /* Free memory and continue on */ |
| 334 | nvme_dbbuf_dma_free(dev); |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) |
| 339 | { |
| 340 | return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); |
| 341 | } |
| 342 | |
| 343 | /* Update dbbuf and return true if an MMIO is required */ |
| 344 | static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, |
| 345 | volatile u32 *dbbuf_ei) |
| 346 | { |
| 347 | if (dbbuf_db) { |
| 348 | u16 old_value; |
| 349 | |
| 350 | /* |
| 351 | * Ensure that the queue is written before updating |
| 352 | * the doorbell in memory |
| 353 | */ |
| 354 | wmb(); |
| 355 | |
| 356 | old_value = *dbbuf_db; |
| 357 | *dbbuf_db = value; |
| 358 | |
Michal Wnukowski | f1ed3df | 2018-08-15 15:51:57 -0700 | [diff] [blame] | 359 | /* |
| 360 | * Ensure that the doorbell is updated before reading the event |
| 361 | * index from memory. The controller needs to provide similar |
| 362 | * ordering to ensure the envent index is updated before reading |
| 363 | * the doorbell. |
| 364 | */ |
| 365 | mb(); |
| 366 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 367 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
| 368 | return false; |
| 369 | } |
| 370 | |
| 371 | return true; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 372 | } |
| 373 | |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 374 | /* |
| 375 | * Max size of iod being embedded in the request payload |
| 376 | */ |
| 377 | #define NVME_INT_PAGES 2 |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 378 | #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 379 | |
| 380 | /* |
| 381 | * Will slightly overestimate the number of pages needed. This is OK |
| 382 | * as it only leads to a small amount of wasted memory for the lifetime of |
| 383 | * the I/O. |
| 384 | */ |
| 385 | static int nvme_npages(unsigned size, struct nvme_dev *dev) |
| 386 | { |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 387 | unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, |
| 388 | dev->ctrl.page_size); |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 389 | return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); |
| 390 | } |
| 391 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 392 | /* |
| 393 | * Calculates the number of pages needed for the SGL segments. For example a 4k |
| 394 | * page can accommodate 256 SGL descriptors. |
| 395 | */ |
| 396 | static int nvme_pci_npages_sgl(unsigned int num_seg) |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 397 | { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 398 | return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE); |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 399 | } |
| 400 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 401 | static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev, |
| 402 | unsigned int size, unsigned int nseg, bool use_sgl) |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 403 | { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 404 | size_t alloc_size; |
| 405 | |
| 406 | if (use_sgl) |
| 407 | alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg); |
| 408 | else |
| 409 | alloc_size = sizeof(__le64 *) * nvme_npages(size, dev); |
| 410 | |
| 411 | return alloc_size + sizeof(struct scatterlist) * nseg; |
| 412 | } |
| 413 | |
| 414 | static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl) |
| 415 | { |
| 416 | unsigned int alloc_size = nvme_pci_iod_alloc_size(dev, |
| 417 | NVME_INT_BYTES(dev), NVME_INT_PAGES, |
| 418 | use_sgl); |
| 419 | |
| 420 | return sizeof(struct nvme_iod) + alloc_size; |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 421 | } |
| 422 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 423 | static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
| 424 | unsigned int hctx_idx) |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 425 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 426 | struct nvme_dev *dev = data; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 427 | struct nvme_queue *nvmeq = &dev->queues[0]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 428 | |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 429 | WARN_ON(hctx_idx != 0); |
| 430 | WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); |
| 431 | WARN_ON(nvmeq->tags); |
| 432 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 433 | hctx->driver_data = nvmeq; |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 434 | nvmeq->tags = &dev->admin_tagset.tags[0]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 435 | return 0; |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 436 | } |
| 437 | |
Keith Busch | 4af0e21 | 2015-06-08 10:08:13 -0600 | [diff] [blame] | 438 | static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) |
| 439 | { |
| 440 | struct nvme_queue *nvmeq = hctx->driver_data; |
| 441 | |
| 442 | nvmeq->tags = NULL; |
| 443 | } |
| 444 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 445 | static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
| 446 | unsigned int hctx_idx) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 447 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 448 | struct nvme_dev *dev = data; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 449 | struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 450 | |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 451 | if (!nvmeq->tags) |
| 452 | nvmeq->tags = &dev->tagset.tags[hctx_idx]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 453 | |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 454 | WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 455 | hctx->driver_data = nvmeq; |
| 456 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 457 | } |
| 458 | |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 459 | static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, |
| 460 | unsigned int hctx_idx, unsigned int numa_node) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 461 | { |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 462 | struct nvme_dev *dev = set->driver_data; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 463 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | 0350815 | 2017-06-13 09:15:18 +0200 | [diff] [blame] | 464 | int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 465 | struct nvme_queue *nvmeq = &dev->queues[queue_idx]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 466 | |
| 467 | BUG_ON(!nvmeq); |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 468 | iod->nvmeq = nvmeq; |
Sagi Grimberg | 59e29ce | 2018-06-29 16:50:00 -0600 | [diff] [blame] | 469 | |
| 470 | nvme_req(req)->ctrl = &dev->ctrl; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 471 | return 0; |
| 472 | } |
| 473 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 474 | static int queue_irq_offset(struct nvme_dev *dev) |
| 475 | { |
| 476 | /* if we have more than 1 vec, admin queue offsets us by 1 */ |
| 477 | if (dev->num_vecs > 1) |
| 478 | return 1; |
| 479 | |
| 480 | return 0; |
| 481 | } |
| 482 | |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 483 | static int nvme_pci_map_queues(struct blk_mq_tag_set *set) |
| 484 | { |
| 485 | struct nvme_dev *dev = set->driver_data; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 486 | int i, qoff, offset; |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 487 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 488 | offset = queue_irq_offset(dev); |
| 489 | for (i = 0, qoff = 0; i < set->nr_maps; i++) { |
| 490 | struct blk_mq_queue_map *map = &set->map[i]; |
| 491 | |
| 492 | map->nr_queues = dev->io_queues[i]; |
| 493 | if (!map->nr_queues) { |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 494 | BUG_ON(i == HCTX_TYPE_DEFAULT); |
Christoph Hellwig | 7e849dd | 2018-12-17 12:16:27 +0100 | [diff] [blame] | 495 | continue; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 496 | } |
| 497 | |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 498 | /* |
| 499 | * The poll queue(s) doesn't have an IRQ (and hence IRQ |
| 500 | * affinity), so use the regular blk-mq cpu mapping |
| 501 | */ |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 502 | map->queue_offset = qoff; |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 503 | if (i != HCTX_TYPE_POLL) |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 504 | blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); |
| 505 | else |
| 506 | blk_mq_map_queues(map); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 507 | qoff += map->nr_queues; |
| 508 | offset += map->nr_queues; |
| 509 | } |
| 510 | |
| 511 | return 0; |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 512 | } |
| 513 | |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 514 | /* |
| 515 | * Write sq tail if we are asked to, or if the next command would wrap. |
| 516 | */ |
| 517 | static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) |
| 518 | { |
| 519 | if (!write_sq) { |
| 520 | u16 next_tail = nvmeq->sq_tail + 1; |
| 521 | |
| 522 | if (next_tail == nvmeq->q_depth) |
| 523 | next_tail = 0; |
| 524 | if (next_tail != nvmeq->last_sq_tail) |
| 525 | return; |
| 526 | } |
| 527 | |
| 528 | if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, |
| 529 | nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) |
| 530 | writel(nvmeq->sq_tail, nvmeq->q_db); |
| 531 | nvmeq->last_sq_tail = nvmeq->sq_tail; |
| 532 | } |
| 533 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 534 | /** |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 535 | * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 536 | * @nvmeq: The queue to use |
| 537 | * @cmd: The command to send |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 538 | * @write_sq: whether to write to the SQ doorbell |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 539 | */ |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 540 | static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, |
| 541 | bool write_sq) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 542 | { |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 543 | spin_lock(&nvmeq->sq_lock); |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 544 | memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 545 | if (++nvmeq->sq_tail == nvmeq->q_depth) |
| 546 | nvmeq->sq_tail = 0; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 547 | nvme_write_sq_db(nvmeq, write_sq); |
| 548 | spin_unlock(&nvmeq->sq_lock); |
| 549 | } |
| 550 | |
| 551 | static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) |
| 552 | { |
| 553 | struct nvme_queue *nvmeq = hctx->driver_data; |
| 554 | |
| 555 | spin_lock(&nvmeq->sq_lock); |
| 556 | if (nvmeq->sq_tail != nvmeq->last_sq_tail) |
| 557 | nvme_write_sq_db(nvmeq, true); |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 558 | spin_unlock(&nvmeq->sq_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 559 | } |
| 560 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 561 | static void **nvme_pci_iod_list(struct request *req) |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 562 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 563 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 564 | return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 565 | } |
| 566 | |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 567 | static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) |
| 568 | { |
| 569 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Keith Busch | 20469a3 | 2018-01-17 22:04:37 +0100 | [diff] [blame] | 570 | int nseg = blk_rq_nr_phys_segments(req); |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 571 | unsigned int avg_seg_size; |
| 572 | |
Keith Busch | 20469a3 | 2018-01-17 22:04:37 +0100 | [diff] [blame] | 573 | if (nseg == 0) |
| 574 | return false; |
| 575 | |
| 576 | avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 577 | |
| 578 | if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) |
| 579 | return false; |
| 580 | if (!iod->nvmeq->qid) |
| 581 | return false; |
| 582 | if (!sgl_threshold || avg_seg_size < sgl_threshold) |
| 583 | return false; |
| 584 | return true; |
| 585 | } |
| 586 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 587 | static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 588 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 589 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 590 | int nseg = blk_rq_nr_phys_segments(rq); |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 591 | unsigned int size = blk_rq_payload_bytes(rq); |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 592 | |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 593 | iod->use_sgl = nvme_pci_use_sgls(dev, rq); |
| 594 | |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 595 | if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 596 | iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 597 | if (!iod->sg) |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 598 | return BLK_STS_RESOURCE; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 599 | } else { |
| 600 | iod->sg = iod->inline_sg; |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 601 | } |
| 602 | |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 603 | iod->aborted = 0; |
| 604 | iod->npages = -1; |
| 605 | iod->nents = 0; |
| 606 | iod->length = size; |
Keith Busch | f80ec96 | 2016-07-12 16:20:31 -0700 | [diff] [blame] | 607 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 608 | return BLK_STS_OK; |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 609 | } |
| 610 | |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 611 | static void nvme_free_iod(struct nvme_dev *dev, struct request *req) |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 612 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 613 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 614 | const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1; |
| 615 | dma_addr_t dma_addr = iod->first_dma, next_dma_addr; |
| 616 | |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 617 | int i; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 618 | |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 619 | if (iod->npages == 0) |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 620 | dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], |
| 621 | dma_addr); |
| 622 | |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 623 | for (i = 0; i < iod->npages; i++) { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 624 | void *addr = nvme_pci_iod_list(req)[i]; |
| 625 | |
| 626 | if (iod->use_sgl) { |
| 627 | struct nvme_sgl_desc *sg_list = addr; |
| 628 | |
| 629 | next_dma_addr = |
| 630 | le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); |
| 631 | } else { |
| 632 | __le64 *prp_list = addr; |
| 633 | |
| 634 | next_dma_addr = le64_to_cpu(prp_list[last_prp]); |
| 635 | } |
| 636 | |
| 637 | dma_pool_free(dev->prp_page_pool, addr, dma_addr); |
| 638 | dma_addr = next_dma_addr; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 639 | } |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 640 | |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 641 | if (iod->sg != iod->inline_sg) |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 642 | mempool_free(iod->sg, dev->iod_mempool); |
Keith Busch | b4ff9c8 | 2014-08-29 09:06:12 -0600 | [diff] [blame] | 643 | } |
| 644 | |
Keith Busch | d087747 | 2017-09-15 13:05:38 -0400 | [diff] [blame] | 645 | static void nvme_print_sgl(struct scatterlist *sgl, int nents) |
| 646 | { |
| 647 | int i; |
| 648 | struct scatterlist *sg; |
| 649 | |
| 650 | for_each_sg(sgl, sg, nents, i) { |
| 651 | dma_addr_t phys = sg_phys(sg); |
| 652 | pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " |
| 653 | "dma_address:%pad dma_length:%d\n", |
| 654 | i, &phys, sg->offset, sg->length, &sg_dma_address(sg), |
| 655 | sg_dma_len(sg)); |
| 656 | } |
| 657 | } |
| 658 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 659 | static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, |
| 660 | struct request *req, struct nvme_rw_command *cmnd) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 661 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 662 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 663 | struct dma_pool *pool; |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 664 | int length = blk_rq_payload_bytes(req); |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 665 | struct scatterlist *sg = iod->sg; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 666 | int dma_len = sg_dma_len(sg); |
| 667 | u64 dma_addr = sg_dma_address(sg); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 668 | u32 page_size = dev->ctrl.page_size; |
Murali Iyer | f137e0f | 2015-03-26 11:07:51 -0500 | [diff] [blame] | 669 | int offset = dma_addr & (page_size - 1); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 670 | __le64 *prp_list; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 671 | void **list = nvme_pci_iod_list(req); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 672 | dma_addr_t prp_dma; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 673 | int nprps, i; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 674 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 675 | length -= (page_size - offset); |
Jan H. Schönherr | 5228b32 | 2017-08-27 15:56:37 +0200 | [diff] [blame] | 676 | if (length <= 0) { |
| 677 | iod->first_dma = 0; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 678 | goto done; |
Jan H. Schönherr | 5228b32 | 2017-08-27 15:56:37 +0200 | [diff] [blame] | 679 | } |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 680 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 681 | dma_len -= (page_size - offset); |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 682 | if (dma_len) { |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 683 | dma_addr += (page_size - offset); |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 684 | } else { |
| 685 | sg = sg_next(sg); |
| 686 | dma_addr = sg_dma_address(sg); |
| 687 | dma_len = sg_dma_len(sg); |
| 688 | } |
| 689 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 690 | if (length <= page_size) { |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 691 | iod->first_dma = dma_addr; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 692 | goto done; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 693 | } |
| 694 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 695 | nprps = DIV_ROUND_UP(length, page_size); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 696 | if (nprps <= (256 / 8)) { |
| 697 | pool = dev->prp_small_pool; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 698 | iod->npages = 0; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 699 | } else { |
| 700 | pool = dev->prp_page_pool; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 701 | iod->npages = 1; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 702 | } |
| 703 | |
Christoph Hellwig | 69d2b57 | 2015-10-16 07:58:37 +0200 | [diff] [blame] | 704 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); |
Matthew Wilcox | b77954c | 2011-05-12 13:51:41 -0400 | [diff] [blame] | 705 | if (!prp_list) { |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 706 | iod->first_dma = dma_addr; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 707 | iod->npages = -1; |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 708 | return BLK_STS_RESOURCE; |
Matthew Wilcox | b77954c | 2011-05-12 13:51:41 -0400 | [diff] [blame] | 709 | } |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 710 | list[0] = prp_list; |
| 711 | iod->first_dma = prp_dma; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 712 | i = 0; |
| 713 | for (;;) { |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 714 | if (i == page_size >> 3) { |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 715 | __le64 *old_prp_list = prp_list; |
Christoph Hellwig | 69d2b57 | 2015-10-16 07:58:37 +0200 | [diff] [blame] | 716 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 717 | if (!prp_list) |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 718 | return BLK_STS_RESOURCE; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 719 | list[iod->npages++] = prp_list; |
Matthew Wilcox | 7523d83 | 2011-03-16 16:43:40 -0400 | [diff] [blame] | 720 | prp_list[0] = old_prp_list[i - 1]; |
| 721 | old_prp_list[i - 1] = cpu_to_le64(prp_dma); |
| 722 | i = 1; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 723 | } |
| 724 | prp_list[i++] = cpu_to_le64(dma_addr); |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 725 | dma_len -= page_size; |
| 726 | dma_addr += page_size; |
| 727 | length -= page_size; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 728 | if (length <= 0) |
| 729 | break; |
| 730 | if (dma_len > 0) |
| 731 | continue; |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 732 | if (unlikely(dma_len < 0)) |
| 733 | goto bad_sgl; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 734 | sg = sg_next(sg); |
| 735 | dma_addr = sg_dma_address(sg); |
| 736 | dma_len = sg_dma_len(sg); |
| 737 | } |
| 738 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 739 | done: |
| 740 | cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); |
| 741 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); |
| 742 | |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 743 | return BLK_STS_OK; |
| 744 | |
| 745 | bad_sgl: |
Keith Busch | d087747 | 2017-09-15 13:05:38 -0400 | [diff] [blame] | 746 | WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), |
| 747 | "Invalid SGL for payload:%d nents:%d\n", |
| 748 | blk_rq_payload_bytes(req), iod->nents); |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 749 | return BLK_STS_IOERR; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 750 | } |
| 751 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 752 | static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, |
| 753 | struct scatterlist *sg) |
| 754 | { |
| 755 | sge->addr = cpu_to_le64(sg_dma_address(sg)); |
| 756 | sge->length = cpu_to_le32(sg_dma_len(sg)); |
| 757 | sge->type = NVME_SGL_FMT_DATA_DESC << 4; |
| 758 | } |
| 759 | |
| 760 | static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, |
| 761 | dma_addr_t dma_addr, int entries) |
| 762 | { |
| 763 | sge->addr = cpu_to_le64(dma_addr); |
| 764 | if (entries < SGES_PER_PAGE) { |
| 765 | sge->length = cpu_to_le32(entries * sizeof(*sge)); |
| 766 | sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; |
| 767 | } else { |
| 768 | sge->length = cpu_to_le32(PAGE_SIZE); |
| 769 | sge->type = NVME_SGL_FMT_SEG_DESC << 4; |
| 770 | } |
| 771 | } |
| 772 | |
| 773 | static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 774 | struct request *req, struct nvme_rw_command *cmd, int entries) |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 775 | { |
| 776 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 777 | struct dma_pool *pool; |
| 778 | struct nvme_sgl_desc *sg_list; |
| 779 | struct scatterlist *sg = iod->sg; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 780 | dma_addr_t sgl_dma; |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 781 | int i = 0; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 782 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 783 | /* setting the transfer type as SGL */ |
| 784 | cmd->flags = NVME_CMD_SGL_METABUF; |
| 785 | |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 786 | if (entries == 1) { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 787 | nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); |
| 788 | return BLK_STS_OK; |
| 789 | } |
| 790 | |
| 791 | if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { |
| 792 | pool = dev->prp_small_pool; |
| 793 | iod->npages = 0; |
| 794 | } else { |
| 795 | pool = dev->prp_page_pool; |
| 796 | iod->npages = 1; |
| 797 | } |
| 798 | |
| 799 | sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); |
| 800 | if (!sg_list) { |
| 801 | iod->npages = -1; |
| 802 | return BLK_STS_RESOURCE; |
| 803 | } |
| 804 | |
| 805 | nvme_pci_iod_list(req)[0] = sg_list; |
| 806 | iod->first_dma = sgl_dma; |
| 807 | |
| 808 | nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); |
| 809 | |
| 810 | do { |
| 811 | if (i == SGES_PER_PAGE) { |
| 812 | struct nvme_sgl_desc *old_sg_desc = sg_list; |
| 813 | struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; |
| 814 | |
| 815 | sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); |
| 816 | if (!sg_list) |
| 817 | return BLK_STS_RESOURCE; |
| 818 | |
| 819 | i = 0; |
| 820 | nvme_pci_iod_list(req)[iod->npages++] = sg_list; |
| 821 | sg_list[i++] = *link; |
| 822 | nvme_pci_sgl_set_seg(link, sgl_dma, entries); |
| 823 | } |
| 824 | |
| 825 | nvme_pci_sgl_set_data(&sg_list[i++], sg); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 826 | sg = sg_next(sg); |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 827 | } while (--entries > 0); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 828 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 829 | return BLK_STS_OK; |
| 830 | } |
| 831 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 832 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 833 | struct nvme_command *cmnd) |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 834 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 835 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 836 | struct request_queue *q = req->q; |
| 837 | enum dma_data_direction dma_dir = rq_data_dir(req) ? |
| 838 | DMA_TO_DEVICE : DMA_FROM_DEVICE; |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 839 | blk_status_t ret = BLK_STS_IOERR; |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 840 | int nr_mapped; |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 841 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 842 | sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 843 | iod->nents = blk_rq_map_sg(q, req, iod->sg); |
| 844 | if (!iod->nents) |
| 845 | goto out; |
| 846 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 847 | ret = BLK_STS_RESOURCE; |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 848 | |
| 849 | if (is_pci_p2pdma_page(sg_page(iod->sg))) |
| 850 | nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents, |
| 851 | dma_dir); |
| 852 | else |
| 853 | nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, |
| 854 | dma_dir, DMA_ATTR_NO_WARN); |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 855 | if (!nr_mapped) |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 856 | goto out; |
| 857 | |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 858 | if (iod->use_sgl) |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 859 | ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 860 | else |
| 861 | ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); |
| 862 | |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 863 | if (ret != BLK_STS_OK) |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 864 | goto out_unmap; |
| 865 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 866 | ret = BLK_STS_IOERR; |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 867 | if (blk_integrity_rq(req)) { |
| 868 | if (blk_rq_count_integrity_sg(q, req->bio) != 1) |
| 869 | goto out_unmap; |
| 870 | |
Christoph Hellwig | bf68405 | 2015-10-26 17:12:51 +0900 | [diff] [blame] | 871 | sg_init_table(&iod->meta_sg, 1); |
| 872 | if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 873 | goto out_unmap; |
| 874 | |
Christoph Hellwig | bf68405 | 2015-10-26 17:12:51 +0900 | [diff] [blame] | 875 | if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 876 | goto out_unmap; |
Chaitanya Kulkarni | 3045c0d | 2018-10-17 11:34:15 -0700 | [diff] [blame] | 877 | |
| 878 | cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 879 | } |
| 880 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 881 | return BLK_STS_OK; |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 882 | |
| 883 | out_unmap: |
| 884 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); |
| 885 | out: |
| 886 | return ret; |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 887 | } |
| 888 | |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 889 | static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) |
Christoph Hellwig | d4f6c3a | 2015-11-26 10:51:23 +0100 | [diff] [blame] | 890 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 891 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | d4f6c3a | 2015-11-26 10:51:23 +0100 | [diff] [blame] | 892 | enum dma_data_direction dma_dir = rq_data_dir(req) ? |
| 893 | DMA_TO_DEVICE : DMA_FROM_DEVICE; |
| 894 | |
| 895 | if (iod->nents) { |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 896 | /* P2PDMA requests do not need to be unmapped */ |
| 897 | if (!is_pci_p2pdma_page(sg_page(iod->sg))) |
| 898 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); |
| 899 | |
Max Gurtovoy | f7f1fc3 | 2018-07-30 00:15:33 +0300 | [diff] [blame] | 900 | if (blk_integrity_rq(req)) |
Christoph Hellwig | bf68405 | 2015-10-26 17:12:51 +0900 | [diff] [blame] | 901 | dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); |
Christoph Hellwig | d4f6c3a | 2015-11-26 10:51:23 +0100 | [diff] [blame] | 902 | } |
| 903 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 904 | nvme_cleanup_cmd(req); |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 905 | nvme_free_iod(dev, req); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 906 | } |
| 907 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 908 | /* |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 909 | * NOTE: ns is NULL when called on the admin queue. |
| 910 | */ |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 911 | static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 912 | const struct blk_mq_queue_data *bd) |
Keith Busch | 53562be | 2014-04-29 11:41:29 -0600 | [diff] [blame] | 913 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 914 | struct nvme_ns *ns = hctx->queue->queuedata; |
| 915 | struct nvme_queue *nvmeq = hctx->driver_data; |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 916 | struct nvme_dev *dev = nvmeq->dev; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 917 | struct request *req = bd->rq; |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 918 | struct nvme_command cmnd; |
Christoph Hellwig | ebe6d87 | 2017-06-12 18:36:32 +0200 | [diff] [blame] | 919 | blk_status_t ret; |
Keith Busch | e1e5e56 | 2015-02-19 13:39:03 -0700 | [diff] [blame] | 920 | |
Jens Axboe | d1f06f4 | 2018-05-17 18:31:49 +0200 | [diff] [blame] | 921 | /* |
| 922 | * We should not need to do this, but we're still using this to |
| 923 | * ensure we can drain requests on a dying queue. |
| 924 | */ |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 925 | if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) |
Jens Axboe | d1f06f4 | 2018-05-17 18:31:49 +0200 | [diff] [blame] | 926 | return BLK_STS_IOERR; |
| 927 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 928 | ret = nvme_setup_cmd(ns, req, &cmnd); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 929 | if (ret) |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 930 | return ret; |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 931 | |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 932 | ret = nvme_init_iod(req, dev); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 933 | if (ret) |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 934 | goto out_free_cmd; |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 935 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 936 | if (blk_rq_nr_phys_segments(req)) { |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 937 | ret = nvme_map_data(dev, req, &cmnd); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 938 | if (ret) |
| 939 | goto out_cleanup_iod; |
| 940 | } |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 941 | |
Christoph Hellwig | aae239e | 2015-11-26 12:59:50 +0100 | [diff] [blame] | 942 | blk_mq_start_request(req); |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 943 | nvme_submit_cmd(nvmeq, &cmnd, bd->last); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 944 | return BLK_STS_OK; |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 945 | out_cleanup_iod: |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 946 | nvme_free_iod(dev, req); |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 947 | out_free_cmd: |
| 948 | nvme_cleanup_cmd(req); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 949 | return ret; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 950 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 951 | |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 952 | static void nvme_pci_complete_rq(struct request *req) |
Christoph Hellwig | eee417b | 2015-11-26 13:03:13 +0100 | [diff] [blame] | 953 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 954 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | eee417b | 2015-11-26 13:03:13 +0100 | [diff] [blame] | 955 | |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 956 | nvme_unmap_data(iod->nvmeq->dev, req); |
| 957 | nvme_complete_rq(req); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 958 | } |
| 959 | |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 960 | /* We read the CQE phase first to check if the rest of the entry is valid */ |
Christoph Hellwig | 750dde4 | 2018-05-18 08:37:04 -0600 | [diff] [blame] | 961 | static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 962 | { |
Christoph Hellwig | 750dde4 | 2018-05-18 08:37:04 -0600 | [diff] [blame] | 963 | return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == |
| 964 | nvmeq->cq_phase; |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 965 | } |
| 966 | |
Sagi Grimberg | eb281c8 | 2017-06-18 17:28:07 +0300 | [diff] [blame] | 967 | static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 968 | { |
Sagi Grimberg | eb281c8 | 2017-06-18 17:28:07 +0300 | [diff] [blame] | 969 | u16 head = nvmeq->cq_head; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 970 | |
Keith Busch | 397c699 | 2018-06-06 08:13:05 -0600 | [diff] [blame] | 971 | if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, |
| 972 | nvmeq->dbbuf_cq_ei)) |
| 973 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); |
Sagi Grimberg | eb281c8 | 2017-06-18 17:28:07 +0300 | [diff] [blame] | 974 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 975 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 976 | static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 977 | { |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 978 | volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 979 | struct request *req; |
| 980 | |
| 981 | if (unlikely(cqe->command_id >= nvmeq->q_depth)) { |
| 982 | dev_warn(nvmeq->dev->ctrl.device, |
| 983 | "invalid id %d completed on queue %d\n", |
| 984 | cqe->command_id, le16_to_cpu(cqe->sq_id)); |
| 985 | return; |
| 986 | } |
| 987 | |
| 988 | /* |
| 989 | * AEN requests are special as they don't time out and can |
| 990 | * survive any kind of queue freeze and often don't respond to |
| 991 | * aborts. We don't even bother to allocate a struct request |
| 992 | * for them but rather special case them here. |
| 993 | */ |
| 994 | if (unlikely(nvmeq->qid == 0 && |
Keith Busch | 38dabe2 | 2017-11-07 15:13:10 -0700 | [diff] [blame] | 995 | cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 996 | nvme_complete_async_event(&nvmeq->dev->ctrl, |
| 997 | cqe->status, &cqe->result); |
| 998 | return; |
| 999 | } |
| 1000 | |
| 1001 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); |
yupeng | 604c01d | 2018-12-18 17:59:53 +0100 | [diff] [blame] | 1002 | trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 1003 | nvme_end_request(req, cqe->status, cqe->result); |
| 1004 | } |
| 1005 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1006 | static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1007 | { |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1008 | while (start != end) { |
| 1009 | nvme_handle_cqe(nvmeq, start); |
| 1010 | if (++start == nvmeq->q_depth) |
| 1011 | start = 0; |
Sagi Grimberg | 920d13a | 2017-06-18 17:28:09 +0300 | [diff] [blame] | 1012 | } |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1013 | } |
| 1014 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1015 | static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1016 | { |
Hongbo Yao | dcca166 | 2019-01-07 10:22:07 +0800 | [diff] [blame] | 1017 | if (nvmeq->cq_head == nvmeq->q_depth - 1) { |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1018 | nvmeq->cq_head = 0; |
| 1019 | nvmeq->cq_phase = !nvmeq->cq_phase; |
Hongbo Yao | dcca166 | 2019-01-07 10:22:07 +0800 | [diff] [blame] | 1020 | } else { |
| 1021 | nvmeq->cq_head++; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1022 | } |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1023 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1024 | |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 1025 | static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start, |
| 1026 | u16 *end, unsigned int tag) |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1027 | { |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 1028 | int found = 0; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1029 | |
| 1030 | *start = nvmeq->cq_head; |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 1031 | while (nvme_cqe_pending(nvmeq)) { |
| 1032 | if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag) |
| 1033 | found++; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1034 | nvme_update_cq_head(nvmeq); |
| 1035 | } |
| 1036 | *end = nvmeq->cq_head; |
| 1037 | |
| 1038 | if (*start != *end) |
Sagi Grimberg | 920d13a | 2017-06-18 17:28:09 +0300 | [diff] [blame] | 1039 | nvme_ring_cq_doorbell(nvmeq); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1040 | return found; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1041 | } |
| 1042 | |
| 1043 | static irqreturn_t nvme_irq(int irq, void *data) |
| 1044 | { |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 1045 | struct nvme_queue *nvmeq = data; |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 1046 | irqreturn_t ret = IRQ_NONE; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1047 | u16 start, end; |
| 1048 | |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1049 | /* |
| 1050 | * The rmb/wmb pair ensures we see all updates from a previous run of |
| 1051 | * the irq handler, even if that was on another CPU. |
| 1052 | */ |
| 1053 | rmb(); |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 1054 | if (nvmeq->cq_head != nvmeq->last_cq_head) |
| 1055 | ret = IRQ_HANDLED; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1056 | nvme_process_cq(nvmeq, &start, &end, -1); |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 1057 | nvmeq->last_cq_head = nvmeq->cq_head; |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1058 | wmb(); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1059 | |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 1060 | if (start != end) { |
| 1061 | nvme_complete_cqes(nvmeq, start, end); |
| 1062 | return IRQ_HANDLED; |
| 1063 | } |
| 1064 | |
| 1065 | return ret; |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 1066 | } |
| 1067 | |
| 1068 | static irqreturn_t nvme_irq_check(int irq, void *data) |
| 1069 | { |
| 1070 | struct nvme_queue *nvmeq = data; |
Christoph Hellwig | 750dde4 | 2018-05-18 08:37:04 -0600 | [diff] [blame] | 1071 | if (nvme_cqe_pending(nvmeq)) |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 1072 | return IRQ_WAKE_THREAD; |
| 1073 | return IRQ_NONE; |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 1074 | } |
| 1075 | |
Christoph Hellwig | 0b2a8a9 | 2018-12-02 17:46:20 +0100 | [diff] [blame] | 1076 | /* |
| 1077 | * Poll for completions any queue, including those not dedicated to polling. |
| 1078 | * Can be called from any context. |
| 1079 | */ |
| 1080 | static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag) |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1081 | { |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1082 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1083 | u16 start, end; |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 1084 | int found; |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1085 | |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1086 | /* |
| 1087 | * For a poll queue we need to protect against the polling thread |
| 1088 | * using the CQ lock. For normal interrupt driven threads we have |
| 1089 | * to disable the interrupt to avoid racing with it. |
| 1090 | */ |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1091 | if (nvmeq->cq_vector == -1) { |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1092 | spin_lock(&nvmeq->cq_poll_lock); |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1093 | found = nvme_process_cq(nvmeq, &start, &end, tag); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1094 | spin_unlock(&nvmeq->cq_poll_lock); |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1095 | } else { |
| 1096 | disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); |
| 1097 | found = nvme_process_cq(nvmeq, &start, &end, tag); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1098 | enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1099 | } |
Sagi Grimberg | 442e19b | 2017-06-18 17:28:10 +0300 | [diff] [blame] | 1100 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1101 | nvme_complete_cqes(nvmeq, start, end); |
Sagi Grimberg | 442e19b | 2017-06-18 17:28:10 +0300 | [diff] [blame] | 1102 | return found; |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1103 | } |
| 1104 | |
Jens Axboe | 9743139 | 2018-11-16 09:48:21 -0700 | [diff] [blame] | 1105 | static int nvme_poll(struct blk_mq_hw_ctx *hctx) |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1106 | { |
| 1107 | struct nvme_queue *nvmeq = hctx->driver_data; |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1108 | u16 start, end; |
| 1109 | bool found; |
| 1110 | |
| 1111 | if (!nvme_cqe_pending(nvmeq)) |
| 1112 | return 0; |
| 1113 | |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1114 | spin_lock(&nvmeq->cq_poll_lock); |
Jens Axboe | 9743139 | 2018-11-16 09:48:21 -0700 | [diff] [blame] | 1115 | found = nvme_process_cq(nvmeq, &start, &end, -1); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1116 | spin_unlock(&nvmeq->cq_poll_lock); |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1117 | |
| 1118 | nvme_complete_cqes(nvmeq, start, end); |
| 1119 | return found; |
| 1120 | } |
| 1121 | |
Keith Busch | ad22c35 | 2017-11-07 15:13:12 -0700 | [diff] [blame] | 1122 | static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1123 | { |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 1124 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1125 | struct nvme_queue *nvmeq = &dev->queues[0]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1126 | struct nvme_command c; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1127 | |
| 1128 | memset(&c, 0, sizeof(c)); |
| 1129 | c.common.opcode = nvme_admin_async_event; |
Keith Busch | ad22c35 | 2017-11-07 15:13:12 -0700 | [diff] [blame] | 1130 | c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 1131 | nvme_submit_cmd(nvmeq, &c, true); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1132 | } |
| 1133 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1134 | static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) |
| 1135 | { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1136 | struct nvme_command c; |
| 1137 | |
| 1138 | memset(&c, 0, sizeof(c)); |
| 1139 | c.delete_queue.opcode = opcode; |
| 1140 | c.delete_queue.qid = cpu_to_le16(id); |
| 1141 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1142 | return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1143 | } |
| 1144 | |
| 1145 | static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1146 | struct nvme_queue *nvmeq, s16 vector) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1147 | { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1148 | struct nvme_command c; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1149 | int flags = NVME_QUEUE_PHYS_CONTIG; |
| 1150 | |
| 1151 | if (vector != -1) |
| 1152 | flags |= NVME_CQ_IRQ_ENABLED; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1153 | |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1154 | /* |
Minwoo Im | 16772ae | 2017-10-18 22:56:09 +0900 | [diff] [blame] | 1155 | * Note: we (ab)use the fact that the prp fields survive if no data |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1156 | * is attached to the request. |
| 1157 | */ |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1158 | memset(&c, 0, sizeof(c)); |
| 1159 | c.create_cq.opcode = nvme_admin_create_cq; |
| 1160 | c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); |
| 1161 | c.create_cq.cqid = cpu_to_le16(qid); |
| 1162 | c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 1163 | c.create_cq.cq_flags = cpu_to_le16(flags); |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1164 | if (vector != -1) |
| 1165 | c.create_cq.irq_vector = cpu_to_le16(vector); |
| 1166 | else |
| 1167 | c.create_cq.irq_vector = 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1168 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1169 | return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1170 | } |
| 1171 | |
| 1172 | static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, |
| 1173 | struct nvme_queue *nvmeq) |
| 1174 | { |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 1175 | struct nvme_ctrl *ctrl = &dev->ctrl; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1176 | struct nvme_command c; |
Keith Busch | 81c1cd9 | 2017-04-04 18:18:12 -0400 | [diff] [blame] | 1177 | int flags = NVME_QUEUE_PHYS_CONTIG; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1178 | |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1179 | /* |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 1180 | * Some drives have a bug that auto-enables WRRU if MEDIUM isn't |
| 1181 | * set. Since URGENT priority is zeroes, it makes all queues |
| 1182 | * URGENT. |
| 1183 | */ |
| 1184 | if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) |
| 1185 | flags |= NVME_SQ_PRIO_MEDIUM; |
| 1186 | |
| 1187 | /* |
Minwoo Im | 16772ae | 2017-10-18 22:56:09 +0900 | [diff] [blame] | 1188 | * Note: we (ab)use the fact that the prp fields survive if no data |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1189 | * is attached to the request. |
| 1190 | */ |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1191 | memset(&c, 0, sizeof(c)); |
| 1192 | c.create_sq.opcode = nvme_admin_create_sq; |
| 1193 | c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); |
| 1194 | c.create_sq.sqid = cpu_to_le16(qid); |
| 1195 | c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 1196 | c.create_sq.sq_flags = cpu_to_le16(flags); |
| 1197 | c.create_sq.cqid = cpu_to_le16(qid); |
| 1198 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1199 | return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1200 | } |
| 1201 | |
| 1202 | static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) |
| 1203 | { |
| 1204 | return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); |
| 1205 | } |
| 1206 | |
| 1207 | static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) |
| 1208 | { |
| 1209 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); |
| 1210 | } |
| 1211 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1212 | static void abort_endio(struct request *req, blk_status_t error) |
Matthew Wilcox | bc5fc7e | 2011-09-19 17:08:14 -0400 | [diff] [blame] | 1213 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 1214 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
| 1215 | struct nvme_queue *nvmeq = iod->nvmeq; |
Matthew Wilcox | bc5fc7e | 2011-09-19 17:08:14 -0400 | [diff] [blame] | 1216 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 1217 | dev_warn(nvmeq->dev->ctrl.device, |
| 1218 | "Abort status: 0x%x", nvme_req(req)->status); |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1219 | atomic_inc(&nvmeq->dev->ctrl.abort_limit); |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1220 | blk_mq_free_request(req); |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1221 | } |
| 1222 | |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1223 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) |
| 1224 | { |
| 1225 | |
| 1226 | /* If true, indicates loss of adapter communication, possibly by a |
| 1227 | * NVMe Subsystem reset. |
| 1228 | */ |
| 1229 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); |
| 1230 | |
Jianchao Wang | ad70062 | 2018-01-22 22:03:16 +0800 | [diff] [blame] | 1231 | /* If there is a reset/reinit ongoing, we shouldn't reset again. */ |
| 1232 | switch (dev->ctrl.state) { |
| 1233 | case NVME_CTRL_RESETTING: |
Max Gurtovoy | ad6a0a5 | 2018-01-31 18:31:24 +0200 | [diff] [blame] | 1234 | case NVME_CTRL_CONNECTING: |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1235 | return false; |
Jianchao Wang | ad70062 | 2018-01-22 22:03:16 +0800 | [diff] [blame] | 1236 | default: |
| 1237 | break; |
| 1238 | } |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1239 | |
| 1240 | /* We shouldn't reset unless the controller is on fatal error state |
| 1241 | * _or_ if we lost the communication with it. |
| 1242 | */ |
| 1243 | if (!(csts & NVME_CSTS_CFS) && !nssro) |
| 1244 | return false; |
| 1245 | |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1246 | return true; |
| 1247 | } |
| 1248 | |
| 1249 | static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) |
| 1250 | { |
| 1251 | /* Read a config register to help see what died. */ |
| 1252 | u16 pci_status; |
| 1253 | int result; |
| 1254 | |
| 1255 | result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, |
| 1256 | &pci_status); |
| 1257 | if (result == PCIBIOS_SUCCESSFUL) |
| 1258 | dev_warn(dev->ctrl.device, |
| 1259 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", |
| 1260 | csts, pci_status); |
| 1261 | else |
| 1262 | dev_warn(dev->ctrl.device, |
| 1263 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", |
| 1264 | csts, result); |
| 1265 | } |
| 1266 | |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1267 | static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1268 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 1269 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
| 1270 | struct nvme_queue *nvmeq = iod->nvmeq; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1271 | struct nvme_dev *dev = nvmeq->dev; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1272 | struct request *abort_req; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1273 | struct nvme_command cmd; |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1274 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
| 1275 | |
Wen Xiong | 651438b | 2018-02-15 14:05:10 -0600 | [diff] [blame] | 1276 | /* If PCI error recovery process is happening, we cannot reset or |
| 1277 | * the recovery mechanism will surely fail. |
| 1278 | */ |
| 1279 | mb(); |
| 1280 | if (pci_channel_offline(to_pci_dev(dev->dev))) |
| 1281 | return BLK_EH_RESET_TIMER; |
| 1282 | |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1283 | /* |
| 1284 | * Reset immediately if the controller is failed |
| 1285 | */ |
| 1286 | if (nvme_should_reset(dev, csts)) { |
| 1287 | nvme_warn_reset(dev, csts); |
| 1288 | nvme_dev_disable(dev, false); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 1289 | nvme_reset_ctrl(&dev->ctrl); |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1290 | return BLK_EH_DONE; |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1291 | } |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1292 | |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1293 | /* |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1294 | * Did we miss an interrupt? |
| 1295 | */ |
Christoph Hellwig | 0b2a8a9 | 2018-12-02 17:46:20 +0100 | [diff] [blame] | 1296 | if (nvme_poll_irqdisable(nvmeq, req->tag)) { |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1297 | dev_warn(dev->ctrl.device, |
| 1298 | "I/O %d QID %d timeout, completion polled\n", |
| 1299 | req->tag, nvmeq->qid); |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1300 | return BLK_EH_DONE; |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1301 | } |
| 1302 | |
| 1303 | /* |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1304 | * Shutdown immediately if controller times out while starting. The |
| 1305 | * reset work will see the pci device disabled when it gets the forced |
| 1306 | * cancellation error. All outstanding requests are completed on |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1307 | * shutdown, so we return BLK_EH_DONE. |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1308 | */ |
Keith Busch | 4244140 | 2018-02-08 08:55:34 -0700 | [diff] [blame] | 1309 | switch (dev->ctrl.state) { |
| 1310 | case NVME_CTRL_CONNECTING: |
| 1311 | case NVME_CTRL_RESETTING: |
Keith Busch | b9cac43 | 2018-05-24 14:34:55 -0600 | [diff] [blame] | 1312 | dev_warn_ratelimited(dev->ctrl.device, |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1313 | "I/O %d QID %d timeout, disable controller\n", |
| 1314 | req->tag, nvmeq->qid); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1315 | nvme_dev_disable(dev, false); |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 1316 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1317 | return BLK_EH_DONE; |
Keith Busch | 4244140 | 2018-02-08 08:55:34 -0700 | [diff] [blame] | 1318 | default: |
| 1319 | break; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1320 | } |
| 1321 | |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1322 | /* |
| 1323 | * Shutdown the controller immediately and schedule a reset if the |
| 1324 | * command was already aborted once before and still hasn't been |
| 1325 | * returned to the driver, or if this is the admin queue. |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1326 | */ |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 1327 | if (!nvmeq->qid || iod->aborted) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1328 | dev_warn(dev->ctrl.device, |
Keith Busch | e1569a1 | 2015-11-26 12:11:07 +0100 | [diff] [blame] | 1329 | "I/O %d QID %d timeout, reset controller\n", |
| 1330 | req->tag, nvmeq->qid); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1331 | nvme_dev_disable(dev, false); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 1332 | nvme_reset_ctrl(&dev->ctrl); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1333 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 1334 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1335 | return BLK_EH_DONE; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1336 | } |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1337 | |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1338 | if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { |
| 1339 | atomic_inc(&dev->ctrl.abort_limit); |
| 1340 | return BLK_EH_RESET_TIMER; |
| 1341 | } |
Keith Busch | 7bf7d77 | 2017-01-24 18:07:00 -0500 | [diff] [blame] | 1342 | iod->aborted = 1; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1343 | |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1344 | memset(&cmd, 0, sizeof(cmd)); |
| 1345 | cmd.abort.opcode = nvme_admin_abort_cmd; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1346 | cmd.abort.cid = req->tag; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1347 | cmd.abort.sqid = cpu_to_le16(nvmeq->qid); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1348 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1349 | dev_warn(nvmeq->dev->ctrl.device, |
| 1350 | "I/O %d QID %d timeout, aborting\n", |
| 1351 | req->tag, nvmeq->qid); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1352 | |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1353 | abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 1354 | BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 1355 | if (IS_ERR(abort_req)) { |
| 1356 | atomic_inc(&dev->ctrl.abort_limit); |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1357 | return BLK_EH_RESET_TIMER; |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 1358 | } |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1359 | |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1360 | abort_req->timeout = ADMIN_TIMEOUT; |
| 1361 | abort_req->end_io_data = NULL; |
| 1362 | blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 1363 | |
Keith Busch | 7a509a6 | 2015-01-07 18:55:53 -0700 | [diff] [blame] | 1364 | /* |
| 1365 | * The aborted req will be completed on receiving the abort req. |
| 1366 | * We enable the timer again. If hit twice, it'll cause a device reset, |
| 1367 | * as the device then is in a faulty state. |
| 1368 | */ |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 1369 | return BLK_EH_RESET_TIMER; |
Matthew Wilcox | a09115b | 2012-08-07 15:56:23 -0400 | [diff] [blame] | 1370 | } |
| 1371 | |
Keith Busch | f435c28 | 2014-07-07 09:14:42 -0600 | [diff] [blame] | 1372 | static void nvme_free_queue(struct nvme_queue *nvmeq) |
Matthew Wilcox | 9e86677 | 2012-08-03 13:55:56 -0400 | [diff] [blame] | 1373 | { |
| 1374 | dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), |
| 1375 | (void *)nvmeq->cqes, nvmeq->cq_dma_addr); |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1376 | if (!nvmeq->sq_cmds) |
| 1377 | return; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1378 | |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1379 | if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { |
| 1380 | pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev), |
| 1381 | nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth)); |
| 1382 | } else { |
| 1383 | dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), |
| 1384 | nvmeq->sq_cmds, nvmeq->sq_dma_addr); |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1385 | } |
Matthew Wilcox | 9e86677 | 2012-08-03 13:55:56 -0400 | [diff] [blame] | 1386 | } |
| 1387 | |
Keith Busch | a1a5ef9 | 2013-12-16 13:50:00 -0500 | [diff] [blame] | 1388 | static void nvme_free_queues(struct nvme_dev *dev, int lowest) |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1389 | { |
| 1390 | int i; |
| 1391 | |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1392 | for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1393 | dev->ctrl.queue_count--; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1394 | nvme_free_queue(&dev->queues[i]); |
kaoudis | 121c7ad | 2015-01-14 21:01:58 -0700 | [diff] [blame] | 1395 | } |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1396 | } |
| 1397 | |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1398 | /** |
| 1399 | * nvme_suspend_queue - put queue into suspended state |
Bart Van Assche | 40581d1 | 2018-10-08 14:28:43 -0700 | [diff] [blame] | 1400 | * @nvmeq: queue to suspend |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1401 | */ |
| 1402 | static int nvme_suspend_queue(struct nvme_queue *nvmeq) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1403 | { |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1404 | if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) |
Keith Busch | 2b25d98 | 2014-12-22 12:59:04 -0700 | [diff] [blame] | 1405 | return 1; |
Matthew Wilcox | a09115b | 2012-08-07 15:56:23 -0400 | [diff] [blame] | 1406 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1407 | /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ |
Jens Axboe | d1f06f4 | 2018-05-17 18:31:49 +0200 | [diff] [blame] | 1408 | mb(); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1409 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1410 | nvmeq->dev->online_queues--; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1411 | if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) |
Sagi Grimberg | c81545f | 2017-07-02 15:53:27 +0300 | [diff] [blame] | 1412 | blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1413 | if (nvmeq->cq_vector == -1) |
| 1414 | return 0; |
| 1415 | pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); |
| 1416 | nvmeq->cq_vector = -1; |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1417 | return 0; |
| 1418 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1419 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 1420 | static void nvme_suspend_io_queues(struct nvme_dev *dev) |
| 1421 | { |
| 1422 | int i; |
| 1423 | |
| 1424 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) |
| 1425 | nvme_suspend_queue(&dev->queues[i]); |
| 1426 | } |
| 1427 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1428 | static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1429 | { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1430 | struct nvme_queue *nvmeq = &dev->queues[0]; |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1431 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1432 | if (shutdown) |
| 1433 | nvme_shutdown_ctrl(&dev->ctrl); |
| 1434 | else |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 1435 | nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 1436 | |
Christoph Hellwig | 0b2a8a9 | 2018-12-02 17:46:20 +0100 | [diff] [blame] | 1437 | nvme_poll_irqdisable(nvmeq, -1); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1438 | } |
| 1439 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1440 | static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, |
| 1441 | int entry_size) |
| 1442 | { |
| 1443 | int q_depth = dev->q_depth; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1444 | unsigned q_size_aligned = roundup(q_depth * entry_size, |
| 1445 | dev->ctrl.page_size); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1446 | |
| 1447 | if (q_size_aligned * nr_io_queues > dev->cmb_size) { |
Jon Derrick | c45f5c9 | 2015-07-21 15:08:13 -0600 | [diff] [blame] | 1448 | u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1449 | mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); |
Jon Derrick | c45f5c9 | 2015-07-21 15:08:13 -0600 | [diff] [blame] | 1450 | q_depth = div_u64(mem_per_q, entry_size); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1451 | |
| 1452 | /* |
| 1453 | * Ensure the reduced q_depth is above some threshold where it |
| 1454 | * would be better to map queues in system memory with the |
| 1455 | * original depth |
| 1456 | */ |
| 1457 | if (q_depth < 64) |
| 1458 | return -ENOMEM; |
| 1459 | } |
| 1460 | |
| 1461 | return q_depth; |
| 1462 | } |
| 1463 | |
| 1464 | static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
| 1465 | int qid, int depth) |
| 1466 | { |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1467 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1468 | |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1469 | if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { |
| 1470 | nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); |
| 1471 | nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, |
| 1472 | nvmeq->sq_cmds); |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1473 | if (nvmeq->sq_dma_addr) { |
| 1474 | set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); |
| 1475 | return 0; |
| 1476 | } |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1477 | } |
| 1478 | |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1479 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), |
| 1480 | &nvmeq->sq_dma_addr, GFP_KERNEL); |
Keith Busch | 815c670 | 2018-02-13 05:44:44 -0700 | [diff] [blame] | 1481 | if (!nvmeq->sq_cmds) |
| 1482 | return -ENOMEM; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1483 | return 0; |
| 1484 | } |
| 1485 | |
Keith Busch | a6ff726 | 2018-04-12 09:16:09 -0600 | [diff] [blame] | 1486 | static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1487 | { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1488 | struct nvme_queue *nvmeq = &dev->queues[qid]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1489 | |
Keith Busch | 62314e4 | 2018-01-23 09:16:19 -0700 | [diff] [blame] | 1490 | if (dev->ctrl.queue_count > qid) |
| 1491 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1492 | |
Luis Chamberlain | 750afb0 | 2019-01-04 09:23:09 +0100 | [diff] [blame] | 1493 | nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), |
| 1494 | &nvmeq->cq_dma_addr, GFP_KERNEL); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1495 | if (!nvmeq->cqes) |
| 1496 | goto free_nvmeq; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1497 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1498 | if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1499 | goto free_cqdma; |
| 1500 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1501 | nvmeq->q_dmadev = dev->dev; |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1502 | nvmeq->dev = dev; |
Jens Axboe | 1ab0cd6 | 2018-05-17 18:31:51 +0200 | [diff] [blame] | 1503 | spin_lock_init(&nvmeq->sq_lock); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1504 | spin_lock_init(&nvmeq->cq_poll_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1505 | nvmeq->cq_head = 0; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 1506 | nvmeq->cq_phase = 1; |
Haiyan Hu | b80d5cc | 2013-09-10 11:25:37 +0800 | [diff] [blame] | 1507 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1508 | nvmeq->q_depth = depth; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1509 | nvmeq->qid = qid; |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 1510 | nvmeq->cq_vector = -1; |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1511 | dev->ctrl.queue_count++; |
Jon Derrick | 36a7e99 | 2015-05-27 12:26:23 -0600 | [diff] [blame] | 1512 | |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1513 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1514 | |
| 1515 | free_cqdma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1516 | dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1517 | nvmeq->cq_dma_addr); |
| 1518 | free_nvmeq: |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1519 | return -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1520 | } |
| 1521 | |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 1522 | static int queue_request_irq(struct nvme_queue *nvmeq) |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 1523 | { |
Christoph Hellwig | 0ff199c | 2017-04-13 09:06:43 +0200 | [diff] [blame] | 1524 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
| 1525 | int nr = nvmeq->dev->ctrl.instance; |
| 1526 | |
| 1527 | if (use_threaded_interrupts) { |
| 1528 | return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, |
| 1529 | nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); |
| 1530 | } else { |
| 1531 | return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, |
| 1532 | NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); |
| 1533 | } |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 1534 | } |
| 1535 | |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1536 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1537 | { |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1538 | struct nvme_dev *dev = nvmeq->dev; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1539 | |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1540 | nvmeq->sq_tail = 0; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 1541 | nvmeq->last_sq_tail = 0; |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1542 | nvmeq->cq_head = 0; |
| 1543 | nvmeq->cq_phase = 1; |
Haiyan Hu | b80d5cc | 2013-09-10 11:25:37 +0800 | [diff] [blame] | 1544 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1545 | memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 1546 | nvme_dbbuf_init(dev, nvmeq, qid); |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1547 | dev->online_queues++; |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1548 | wmb(); /* ensure the first interrupt sees the initialization */ |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1549 | } |
| 1550 | |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1551 | static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1552 | { |
| 1553 | struct nvme_dev *dev = nvmeq->dev; |
| 1554 | int result; |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1555 | s16 vector; |
Matthew Wilcox | 3f85d50 | 2011-02-01 08:39:04 -0500 | [diff] [blame] | 1556 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 1557 | clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); |
| 1558 | |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 1559 | /* |
| 1560 | * A queue's vector matches the queue identifier unless the controller |
| 1561 | * has only one vector available. |
| 1562 | */ |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1563 | if (!polled) |
| 1564 | vector = dev->num_vecs == 1 ? 0 : qid; |
| 1565 | else |
| 1566 | vector = -1; |
| 1567 | |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1568 | result = adapter_alloc_cq(dev, qid, nvmeq, vector); |
Keith Busch | ded4550 | 2018-06-06 08:13:06 -0600 | [diff] [blame] | 1569 | if (result) |
| 1570 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1571 | |
| 1572 | result = adapter_alloc_sq(dev, qid, nvmeq); |
| 1573 | if (result < 0) |
Keith Busch | ded4550 | 2018-06-06 08:13:06 -0600 | [diff] [blame] | 1574 | return result; |
| 1575 | else if (result) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1576 | goto release_cq; |
| 1577 | |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1578 | nvmeq->cq_vector = vector; |
Keith Busch | 161b8be | 2017-09-14 13:54:39 -0400 | [diff] [blame] | 1579 | nvme_init_queue(nvmeq, qid); |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1580 | |
| 1581 | if (vector != -1) { |
| 1582 | result = queue_request_irq(nvmeq); |
| 1583 | if (result < 0) |
| 1584 | goto release_sq; |
| 1585 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1586 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1587 | set_bit(NVMEQ_ENABLED, &nvmeq->flags); |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1588 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1589 | |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1590 | release_sq: |
| 1591 | nvmeq->cq_vector = -1; |
Jianchao Wang | f25a2df | 2018-02-15 19:13:41 +0800 | [diff] [blame] | 1592 | dev->online_queues--; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1593 | adapter_delete_sq(dev, qid); |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1594 | release_cq: |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1595 | adapter_delete_cq(dev, qid); |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1596 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1597 | } |
| 1598 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 1599 | static const struct blk_mq_ops nvme_mq_admin_ops = { |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1600 | .queue_rq = nvme_queue_rq, |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 1601 | .complete = nvme_pci_complete_rq, |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1602 | .init_hctx = nvme_admin_init_hctx, |
Keith Busch | 4af0e21 | 2015-06-08 10:08:13 -0600 | [diff] [blame] | 1603 | .exit_hctx = nvme_admin_exit_hctx, |
Christoph Hellwig | 0350815 | 2017-06-13 09:15:18 +0200 | [diff] [blame] | 1604 | .init_request = nvme_init_request, |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1605 | .timeout = nvme_timeout, |
| 1606 | }; |
| 1607 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 1608 | static const struct blk_mq_ops nvme_mq_ops = { |
Christoph Hellwig | 376f7ef | 2018-12-02 17:46:27 +0100 | [diff] [blame] | 1609 | .queue_rq = nvme_queue_rq, |
| 1610 | .complete = nvme_pci_complete_rq, |
| 1611 | .commit_rqs = nvme_commit_rqs, |
| 1612 | .init_hctx = nvme_init_hctx, |
| 1613 | .init_request = nvme_init_request, |
| 1614 | .map_queues = nvme_pci_map_queues, |
| 1615 | .timeout = nvme_timeout, |
| 1616 | .poll = nvme_poll, |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1617 | }; |
| 1618 | |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1619 | static void nvme_dev_remove_admin(struct nvme_dev *dev) |
| 1620 | { |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1621 | if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 1622 | /* |
| 1623 | * If the controller was reset during removal, it's possible |
| 1624 | * user requests may be waiting on a stopped queue. Start the |
| 1625 | * queue to flush these to completion. |
| 1626 | */ |
Sagi Grimberg | c81545f | 2017-07-02 15:53:27 +0300 | [diff] [blame] | 1627 | blk_mq_unquiesce_queue(dev->ctrl.admin_q); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1628 | blk_cleanup_queue(dev->ctrl.admin_q); |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1629 | blk_mq_free_tag_set(&dev->admin_tagset); |
| 1630 | } |
| 1631 | } |
| 1632 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1633 | static int nvme_alloc_admin_tags(struct nvme_dev *dev) |
| 1634 | { |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1635 | if (!dev->ctrl.admin_q) { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1636 | dev->admin_tagset.ops = &nvme_mq_admin_ops; |
| 1637 | dev->admin_tagset.nr_hw_queues = 1; |
Keith Busch | e3e9d50 | 2016-01-04 09:10:55 -0700 | [diff] [blame] | 1638 | |
Keith Busch | 38dabe2 | 2017-11-07 15:13:10 -0700 | [diff] [blame] | 1639 | dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1640 | dev->admin_tagset.timeout = ADMIN_TIMEOUT; |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1641 | dev->admin_tagset.numa_node = dev_to_node(dev->dev); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 1642 | dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false); |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 1643 | dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1644 | dev->admin_tagset.driver_data = dev; |
| 1645 | |
| 1646 | if (blk_mq_alloc_tag_set(&dev->admin_tagset)) |
| 1647 | return -ENOMEM; |
Sagi Grimberg | 34b6c23 | 2017-07-10 09:22:29 +0300 | [diff] [blame] | 1648 | dev->ctrl.admin_tagset = &dev->admin_tagset; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1649 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1650 | dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); |
| 1651 | if (IS_ERR(dev->ctrl.admin_q)) { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1652 | blk_mq_free_tag_set(&dev->admin_tagset); |
| 1653 | return -ENOMEM; |
| 1654 | } |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1655 | if (!blk_get_queue(dev->ctrl.admin_q)) { |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1656 | nvme_dev_remove_admin(dev); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1657 | dev->ctrl.admin_q = NULL; |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1658 | return -ENODEV; |
| 1659 | } |
Keith Busch | 0fb59cb | 2015-01-07 18:55:50 -0700 | [diff] [blame] | 1660 | } else |
Sagi Grimberg | c81545f | 2017-07-02 15:53:27 +0300 | [diff] [blame] | 1661 | blk_mq_unquiesce_queue(dev->ctrl.admin_q); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1662 | |
| 1663 | return 0; |
| 1664 | } |
| 1665 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 1666 | static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) |
| 1667 | { |
| 1668 | return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); |
| 1669 | } |
| 1670 | |
| 1671 | static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) |
| 1672 | { |
| 1673 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 1674 | |
| 1675 | if (size <= dev->bar_mapped_size) |
| 1676 | return 0; |
| 1677 | if (size > pci_resource_len(pdev, 0)) |
| 1678 | return -ENOMEM; |
| 1679 | if (dev->bar) |
| 1680 | iounmap(dev->bar); |
| 1681 | dev->bar = ioremap(pci_resource_start(pdev, 0), size); |
| 1682 | if (!dev->bar) { |
| 1683 | dev->bar_mapped_size = 0; |
| 1684 | return -ENOMEM; |
| 1685 | } |
| 1686 | dev->bar_mapped_size = size; |
| 1687 | dev->dbs = dev->bar + NVME_REG_DBS; |
| 1688 | |
| 1689 | return 0; |
| 1690 | } |
| 1691 | |
Sagi Grimberg | 01ad099 | 2017-05-01 00:27:17 +0300 | [diff] [blame] | 1692 | static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1693 | { |
Matthew Wilcox | ba47e38 | 2013-05-04 06:43:16 -0400 | [diff] [blame] | 1694 | int result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1695 | u32 aqa; |
| 1696 | struct nvme_queue *nvmeq; |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 1697 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 1698 | result = nvme_remap_bar(dev, db_bar_size(dev, 0)); |
| 1699 | if (result < 0) |
| 1700 | return result; |
| 1701 | |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 1702 | dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 1703 | NVME_CAP_NSSRC(dev->ctrl.cap) : 0; |
Keith Busch | dfbac8c | 2015-08-10 15:20:40 -0600 | [diff] [blame] | 1704 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 1705 | if (dev->subsystem && |
| 1706 | (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) |
| 1707 | writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); |
Keith Busch | dfbac8c | 2015-08-10 15:20:40 -0600 | [diff] [blame] | 1708 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 1709 | result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); |
Matthew Wilcox | ba47e38 | 2013-05-04 06:43:16 -0400 | [diff] [blame] | 1710 | if (result < 0) |
| 1711 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1712 | |
Keith Busch | a6ff726 | 2018-04-12 09:16:09 -0600 | [diff] [blame] | 1713 | result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1714 | if (result) |
| 1715 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1716 | |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1717 | nvmeq = &dev->queues[0]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1718 | aqa = nvmeq->q_depth - 1; |
| 1719 | aqa |= aqa << 16; |
| 1720 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 1721 | writel(aqa, dev->bar + NVME_REG_AQA); |
| 1722 | lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); |
| 1723 | lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 1724 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 1725 | result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap); |
Keith Busch | 025c557 | 2013-05-01 13:07:51 -0600 | [diff] [blame] | 1726 | if (result) |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 1727 | return result; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1728 | |
Keith Busch | 2b25d98 | 2014-12-22 12:59:04 -0700 | [diff] [blame] | 1729 | nvmeq->cq_vector = 0; |
Keith Busch | 161b8be | 2017-09-14 13:54:39 -0400 | [diff] [blame] | 1730 | nvme_init_queue(nvmeq, 0); |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 1731 | result = queue_request_irq(nvmeq); |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 1732 | if (result) { |
| 1733 | nvmeq->cq_vector = -1; |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 1734 | return result; |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 1735 | } |
Keith Busch | 025c557 | 2013-05-01 13:07:51 -0600 | [diff] [blame] | 1736 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1737 | set_bit(NVMEQ_ENABLED, &nvmeq->flags); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1738 | return result; |
| 1739 | } |
| 1740 | |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1741 | static int nvme_create_io_queues(struct nvme_dev *dev) |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1742 | { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1743 | unsigned i, max, rw_queues; |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1744 | int ret = 0; |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1745 | |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1746 | for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { |
Keith Busch | a6ff726 | 2018-04-12 09:16:09 -0600 | [diff] [blame] | 1747 | if (nvme_alloc_queue(dev, i, dev->q_depth)) { |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1748 | ret = -ENOMEM; |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1749 | break; |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1750 | } |
| 1751 | } |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1752 | |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1753 | max = min(dev->max_qid, dev->ctrl.queue_count - 1); |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 1754 | if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { |
| 1755 | rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + |
| 1756 | dev->io_queues[HCTX_TYPE_READ]; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1757 | } else { |
| 1758 | rw_queues = max; |
| 1759 | } |
| 1760 | |
Keith Busch | 949928c | 2015-12-17 17:08:15 -0700 | [diff] [blame] | 1761 | for (i = dev->online_queues; i <= max; i++) { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1762 | bool polled = i > rw_queues; |
| 1763 | |
| 1764 | ret = nvme_create_queue(&dev->queues[i], i, polled); |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 1765 | if (ret) |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1766 | break; |
Matthew Wilcox | 27e8166 | 2014-04-11 11:58:45 -0400 | [diff] [blame] | 1767 | } |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1768 | |
| 1769 | /* |
| 1770 | * Ignore failing Create SQ/CQ commands, we can continue with less |
Minwoo Im | 8adb8c1 | 2018-01-14 16:14:27 +0900 | [diff] [blame] | 1771 | * than the desired amount of queues, and even a controller without |
| 1772 | * I/O queues can still be used to issue admin commands. This might |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1773 | * be useful to upgrade a buggy firmware for example. |
| 1774 | */ |
| 1775 | return ret >= 0 ? 0 : ret; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1776 | } |
| 1777 | |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 1778 | static ssize_t nvme_cmb_show(struct device *dev, |
| 1779 | struct device_attribute *attr, |
| 1780 | char *buf) |
| 1781 | { |
| 1782 | struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); |
| 1783 | |
Stephen Bates | c965809 | 2016-12-16 11:54:50 -0700 | [diff] [blame] | 1784 | return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 1785 | ndev->cmbloc, ndev->cmbsz); |
| 1786 | } |
| 1787 | static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); |
| 1788 | |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1789 | static u64 nvme_cmb_size_unit(struct nvme_dev *dev) |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1790 | { |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1791 | u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; |
| 1792 | |
| 1793 | return 1ULL << (12 + 4 * szu); |
| 1794 | } |
| 1795 | |
| 1796 | static u32 nvme_cmb_size(struct nvme_dev *dev) |
| 1797 | { |
| 1798 | return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; |
| 1799 | } |
| 1800 | |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1801 | static void nvme_map_cmb(struct nvme_dev *dev) |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1802 | { |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1803 | u64 size, offset; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1804 | resource_size_t bar_size; |
| 1805 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Christoph Hellwig | 8969f1f | 2017-10-01 09:37:35 +0200 | [diff] [blame] | 1806 | int bar; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1807 | |
Keith Busch | 9fe5c59 | 2018-10-31 13:15:29 -0600 | [diff] [blame] | 1808 | if (dev->cmb_size) |
| 1809 | return; |
| 1810 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 1811 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1812 | if (!dev->cmbsz) |
| 1813 | return; |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 1814 | dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1815 | |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1816 | size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); |
| 1817 | offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); |
Christoph Hellwig | 8969f1f | 2017-10-01 09:37:35 +0200 | [diff] [blame] | 1818 | bar = NVME_CMB_BIR(dev->cmbloc); |
| 1819 | bar_size = pci_resource_len(pdev, bar); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1820 | |
| 1821 | if (offset > bar_size) |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1822 | return; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1823 | |
| 1824 | /* |
| 1825 | * Controllers may support a CMB size larger than their BAR, |
| 1826 | * for example, due to being behind a bridge. Reduce the CMB to |
| 1827 | * the reported size of the BAR |
| 1828 | */ |
| 1829 | if (size > bar_size - offset) |
| 1830 | size = bar_size - offset; |
| 1831 | |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1832 | if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { |
| 1833 | dev_warn(dev->ctrl.device, |
| 1834 | "failed to register the CMB\n"); |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1835 | return; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1836 | } |
| 1837 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1838 | dev->cmb_size = size; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1839 | dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); |
| 1840 | |
| 1841 | if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == |
| 1842 | (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) |
| 1843 | pci_p2pmem_publish(pdev, true); |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1844 | |
| 1845 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, |
| 1846 | &dev_attr_cmb.attr, NULL)) |
| 1847 | dev_warn(dev->ctrl.device, |
| 1848 | "failed to add sysfs attribute for CMB\n"); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1849 | } |
| 1850 | |
| 1851 | static inline void nvme_release_cmb(struct nvme_dev *dev) |
| 1852 | { |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1853 | if (dev->cmb_size) { |
Max Gurtovoy | 1c78f77 | 2017-07-30 01:45:08 +0300 | [diff] [blame] | 1854 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, |
| 1855 | &dev_attr_cmb.attr, NULL); |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1856 | dev->cmb_size = 0; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1857 | } |
| 1858 | } |
| 1859 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1860 | static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 1861 | { |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1862 | u64 dma_addr = dev->host_mem_descs_dma; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1863 | struct nvme_command c; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1864 | int ret; |
| 1865 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1866 | memset(&c, 0, sizeof(c)); |
| 1867 | c.features.opcode = nvme_admin_set_features; |
| 1868 | c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); |
| 1869 | c.features.dword11 = cpu_to_le32(bits); |
| 1870 | c.features.dword12 = cpu_to_le32(dev->host_mem_size >> |
| 1871 | ilog2(dev->ctrl.page_size)); |
| 1872 | c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); |
| 1873 | c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); |
| 1874 | c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); |
| 1875 | |
| 1876 | ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
| 1877 | if (ret) { |
| 1878 | dev_warn(dev->ctrl.device, |
| 1879 | "failed to set host mem (err %d, flags %#x).\n", |
| 1880 | ret, bits); |
| 1881 | } |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1882 | return ret; |
| 1883 | } |
| 1884 | |
| 1885 | static void nvme_free_host_mem(struct nvme_dev *dev) |
| 1886 | { |
| 1887 | int i; |
| 1888 | |
| 1889 | for (i = 0; i < dev->nr_host_mem_descs; i++) { |
| 1890 | struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; |
| 1891 | size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; |
| 1892 | |
Liviu Dudau | cc667f6 | 2018-12-29 17:23:43 +0000 | [diff] [blame] | 1893 | dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], |
| 1894 | le64_to_cpu(desc->addr), |
| 1895 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1896 | } |
| 1897 | |
| 1898 | kfree(dev->host_mem_desc_bufs); |
| 1899 | dev->host_mem_desc_bufs = NULL; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1900 | dma_free_coherent(dev->dev, |
| 1901 | dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), |
| 1902 | dev->host_mem_descs, dev->host_mem_descs_dma); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1903 | dev->host_mem_descs = NULL; |
Minwoo Im | 7e5dd57 | 2017-11-25 03:03:00 +0900 | [diff] [blame] | 1904 | dev->nr_host_mem_descs = 0; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1905 | } |
| 1906 | |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1907 | static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, |
| 1908 | u32 chunk_size) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1909 | { |
| 1910 | struct nvme_host_mem_buf_desc *descs; |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1911 | u32 max_entries, len; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1912 | dma_addr_t descs_dma; |
Dan Carpenter | 2ee0e4e | 2017-07-06 12:26:52 +0300 | [diff] [blame] | 1913 | int i = 0; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1914 | void **bufs; |
Minwoo Im | 6fbcde6 | 2017-12-05 05:23:54 +0900 | [diff] [blame] | 1915 | u64 size, tmp; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1916 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1917 | tmp = (preferred + chunk_size - 1); |
| 1918 | do_div(tmp, chunk_size); |
| 1919 | max_entries = tmp; |
Christoph Hellwig | 044a9df | 2017-09-11 12:09:28 -0400 | [diff] [blame] | 1920 | |
| 1921 | if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) |
| 1922 | max_entries = dev->ctrl.hmmaxd; |
| 1923 | |
Luis Chamberlain | 750afb0 | 2019-01-04 09:23:09 +0100 | [diff] [blame] | 1924 | descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), |
| 1925 | &descs_dma, GFP_KERNEL); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1926 | if (!descs) |
| 1927 | goto out; |
| 1928 | |
| 1929 | bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); |
| 1930 | if (!bufs) |
| 1931 | goto out_free_descs; |
| 1932 | |
Minwoo Im | 244a8fe | 2017-11-17 01:34:24 +0900 | [diff] [blame] | 1933 | for (size = 0; size < preferred && i < max_entries; size += len) { |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1934 | dma_addr_t dma_addr; |
| 1935 | |
Christoph Hellwig | 50cdb7c | 2017-07-25 17:39:07 +0200 | [diff] [blame] | 1936 | len = min_t(u64, chunk_size, preferred - size); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1937 | bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, |
| 1938 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
| 1939 | if (!bufs[i]) |
| 1940 | break; |
| 1941 | |
| 1942 | descs[i].addr = cpu_to_le64(dma_addr); |
| 1943 | descs[i].size = cpu_to_le32(len / dev->ctrl.page_size); |
| 1944 | i++; |
| 1945 | } |
| 1946 | |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1947 | if (!size) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1948 | goto out_free_bufs; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1949 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1950 | dev->nr_host_mem_descs = i; |
| 1951 | dev->host_mem_size = size; |
| 1952 | dev->host_mem_descs = descs; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1953 | dev->host_mem_descs_dma = descs_dma; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1954 | dev->host_mem_desc_bufs = bufs; |
| 1955 | return 0; |
| 1956 | |
| 1957 | out_free_bufs: |
| 1958 | while (--i >= 0) { |
| 1959 | size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; |
| 1960 | |
Liviu Dudau | cc667f6 | 2018-12-29 17:23:43 +0000 | [diff] [blame] | 1961 | dma_free_attrs(dev->dev, size, bufs[i], |
| 1962 | le64_to_cpu(descs[i].addr), |
| 1963 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1964 | } |
| 1965 | |
| 1966 | kfree(bufs); |
| 1967 | out_free_descs: |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1968 | dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, |
| 1969 | descs_dma); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1970 | out: |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1971 | dev->host_mem_descs = NULL; |
| 1972 | return -ENOMEM; |
| 1973 | } |
| 1974 | |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1975 | static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) |
| 1976 | { |
| 1977 | u32 chunk_size; |
| 1978 | |
| 1979 | /* start big and work our way down */ |
Akinobu Mita | 30f92d6 | 2017-09-06 12:15:31 +0200 | [diff] [blame] | 1980 | for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); |
Christoph Hellwig | 044a9df | 2017-09-11 12:09:28 -0400 | [diff] [blame] | 1981 | chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1982 | chunk_size /= 2) { |
| 1983 | if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { |
| 1984 | if (!min || dev->host_mem_size >= min) |
| 1985 | return 0; |
| 1986 | nvme_free_host_mem(dev); |
| 1987 | } |
| 1988 | } |
| 1989 | |
| 1990 | return -ENOMEM; |
| 1991 | } |
| 1992 | |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 1993 | static int nvme_setup_host_mem(struct nvme_dev *dev) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1994 | { |
| 1995 | u64 max = (u64)max_host_mem_size_mb * SZ_1M; |
| 1996 | u64 preferred = (u64)dev->ctrl.hmpre * 4096; |
| 1997 | u64 min = (u64)dev->ctrl.hmmin * 4096; |
| 1998 | u32 enable_bits = NVME_HOST_MEM_ENABLE; |
Minwoo Im | 6fbcde6 | 2017-12-05 05:23:54 +0900 | [diff] [blame] | 1999 | int ret; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2000 | |
| 2001 | preferred = min(preferred, max); |
| 2002 | if (min > max) { |
| 2003 | dev_warn(dev->ctrl.device, |
| 2004 | "min host memory (%lld MiB) above limit (%d MiB).\n", |
| 2005 | min >> ilog2(SZ_1M), max_host_mem_size_mb); |
| 2006 | nvme_free_host_mem(dev); |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2007 | return 0; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2008 | } |
| 2009 | |
| 2010 | /* |
| 2011 | * If we already have a buffer allocated check if we can reuse it. |
| 2012 | */ |
| 2013 | if (dev->host_mem_descs) { |
| 2014 | if (dev->host_mem_size >= min) |
| 2015 | enable_bits |= NVME_HOST_MEM_RETURN; |
| 2016 | else |
| 2017 | nvme_free_host_mem(dev); |
| 2018 | } |
| 2019 | |
| 2020 | if (!dev->host_mem_descs) { |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 2021 | if (nvme_alloc_host_mem(dev, min, preferred)) { |
| 2022 | dev_warn(dev->ctrl.device, |
| 2023 | "failed to allocate host memory buffer.\n"); |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2024 | return 0; /* controller must work without HMB */ |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 2025 | } |
| 2026 | |
| 2027 | dev_info(dev->ctrl.device, |
| 2028 | "allocated %lld MiB host memory buffer.\n", |
| 2029 | dev->host_mem_size >> ilog2(SZ_1M)); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2030 | } |
| 2031 | |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2032 | ret = nvme_set_host_mem(dev, enable_bits); |
| 2033 | if (ret) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2034 | nvme_free_host_mem(dev); |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2035 | return ret; |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 2036 | } |
| 2037 | |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2038 | /* irq_queues covers admin queue */ |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2039 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2040 | { |
| 2041 | unsigned int this_w_queues = write_queues; |
| 2042 | |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2043 | WARN_ON(!irq_queues); |
| 2044 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2045 | /* |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2046 | * Setup read/write queue split, assign admin queue one independent |
| 2047 | * irq vector if irq_queues is > 1. |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2048 | */ |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2049 | if (irq_queues <= 2) { |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 2050 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
| 2051 | dev->io_queues[HCTX_TYPE_READ] = 0; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2052 | return; |
| 2053 | } |
| 2054 | |
| 2055 | /* |
| 2056 | * If 'write_queues' is set, ensure it leaves room for at least |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2057 | * one read queue and one admin queue |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2058 | */ |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2059 | if (this_w_queues >= irq_queues) |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2060 | this_w_queues = irq_queues - 2; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2061 | |
| 2062 | /* |
| 2063 | * If 'write_queues' is set to zero, reads and writes will share |
| 2064 | * a queue set. |
| 2065 | */ |
| 2066 | if (!this_w_queues) { |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2067 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 2068 | dev->io_queues[HCTX_TYPE_READ] = 0; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2069 | } else { |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 2070 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2071 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2072 | } |
| 2073 | } |
| 2074 | |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2075 | static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2076 | { |
| 2077 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2078 | int irq_sets[2]; |
| 2079 | struct irq_affinity affd = { |
| 2080 | .pre_vectors = 1, |
| 2081 | .nr_sets = ARRAY_SIZE(irq_sets), |
| 2082 | .sets = irq_sets, |
| 2083 | }; |
Jens Axboe | 30e0662 | 2018-11-14 10:13:50 -0700 | [diff] [blame] | 2084 | int result = 0; |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2085 | unsigned int irq_queues, this_p_queues; |
| 2086 | |
| 2087 | /* |
| 2088 | * Poll queues don't need interrupts, but we need at least one IO |
| 2089 | * queue left over for non-polled IO. |
| 2090 | */ |
| 2091 | this_p_queues = poll_queues; |
| 2092 | if (this_p_queues >= nr_io_queues) { |
| 2093 | this_p_queues = nr_io_queues - 1; |
| 2094 | irq_queues = 1; |
| 2095 | } else { |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2096 | irq_queues = nr_io_queues - this_p_queues + 1; |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2097 | } |
| 2098 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2099 | |
| 2100 | /* |
| 2101 | * For irq sets, we have to ask for minvec == maxvec. This passes |
| 2102 | * any reduction back to us, so we can adjust our queue counts and |
| 2103 | * IRQ vector needs. |
| 2104 | */ |
| 2105 | do { |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2106 | nvme_calc_io_queues(dev, irq_queues); |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 2107 | irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT]; |
| 2108 | irq_sets[1] = dev->io_queues[HCTX_TYPE_READ]; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2109 | if (!irq_sets[1]) |
| 2110 | affd.nr_sets = 1; |
| 2111 | |
| 2112 | /* |
Jens Axboe | db29eb0 | 2018-11-15 16:05:02 -0700 | [diff] [blame] | 2113 | * If we got a failure and we're down to asking for just |
| 2114 | * 1 + 1 queues, just ask for a single vector. We'll share |
| 2115 | * that between the single IO queue and the admin queue. |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2116 | * Otherwise, we assign one independent vector to admin queue. |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2117 | */ |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2118 | if (irq_queues > 1) |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2119 | irq_queues = irq_sets[0] + irq_sets[1] + 1; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2120 | |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2121 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, |
| 2122 | irq_queues, |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2123 | PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); |
| 2124 | |
| 2125 | /* |
Jens Axboe | db29eb0 | 2018-11-15 16:05:02 -0700 | [diff] [blame] | 2126 | * Need to reduce our vec counts. If we get ENOSPC, the |
| 2127 | * platform should support mulitple vecs, we just need |
| 2128 | * to decrease our ask. If we get EINVAL, the platform |
| 2129 | * likely does not. Back down to ask for just one vector. |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2130 | */ |
| 2131 | if (result == -ENOSPC) { |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2132 | irq_queues--; |
| 2133 | if (!irq_queues) |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2134 | return result; |
| 2135 | continue; |
Jens Axboe | db29eb0 | 2018-11-15 16:05:02 -0700 | [diff] [blame] | 2136 | } else if (result == -EINVAL) { |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2137 | irq_queues = 1; |
Jens Axboe | db29eb0 | 2018-11-15 16:05:02 -0700 | [diff] [blame] | 2138 | continue; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2139 | } else if (result <= 0) |
| 2140 | return -EIO; |
| 2141 | break; |
| 2142 | } while (1); |
| 2143 | |
| 2144 | return result; |
| 2145 | } |
| 2146 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2147 | static void nvme_disable_io_queues(struct nvme_dev *dev) |
| 2148 | { |
| 2149 | if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) |
| 2150 | __nvme_disable_io_queues(dev, nvme_admin_delete_cq); |
| 2151 | } |
| 2152 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2153 | static int nvme_setup_io_queues(struct nvme_dev *dev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2154 | { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 2155 | struct nvme_queue *adminq = &dev->queues[0]; |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2156 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 2157 | int result, nr_io_queues; |
| 2158 | unsigned long size; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2159 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2160 | nr_io_queues = max_io_queues(); |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 2161 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); |
| 2162 | if (result < 0) |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 2163 | return result; |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 2164 | |
Christoph Hellwig | f5fa90d | 2016-06-06 23:20:50 +0200 | [diff] [blame] | 2165 | if (nr_io_queues == 0) |
Keith Busch | a522905 | 2016-04-08 16:09:10 -0600 | [diff] [blame] | 2166 | return 0; |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 2167 | |
| 2168 | clear_bit(NVMEQ_ENABLED, &adminq->flags); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2169 | |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 2170 | if (dev->cmb_use_sqes) { |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 2171 | result = nvme_cmb_qdepth(dev, nr_io_queues, |
| 2172 | sizeof(struct nvme_command)); |
| 2173 | if (result > 0) |
| 2174 | dev->q_depth = result; |
| 2175 | else |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 2176 | dev->cmb_use_sqes = false; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 2177 | } |
| 2178 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 2179 | do { |
| 2180 | size = db_bar_size(dev, nr_io_queues); |
| 2181 | result = nvme_remap_bar(dev, size); |
| 2182 | if (!result) |
| 2183 | break; |
| 2184 | if (!--nr_io_queues) |
| 2185 | return -ENOMEM; |
| 2186 | } while (1); |
| 2187 | adminq->q_db = dev->dbs; |
Matthew Wilcox | f1938f6 | 2011-10-20 17:00:41 -0400 | [diff] [blame] | 2188 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2189 | retry: |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 2190 | /* Deregister the admin queue's interrupt */ |
Christoph Hellwig | 0ff199c | 2017-04-13 09:06:43 +0200 | [diff] [blame] | 2191 | pci_free_irq(pdev, 0, adminq); |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 2192 | |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2193 | /* |
| 2194 | * If we enable msix early due to not intx, disable it again before |
| 2195 | * setting up the full range we need. |
| 2196 | */ |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2197 | pci_free_irq_vectors(pdev); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2198 | |
| 2199 | result = nvme_setup_irqs(dev, nr_io_queues); |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 2200 | if (result <= 0) |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2201 | return -EIO; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2202 | |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 2203 | dev->num_vecs = result; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 2204 | result = max(result - 1, 1); |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 2205 | dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 2206 | |
Matthew Wilcox | 063a809 | 2013-06-20 10:53:48 -0400 | [diff] [blame] | 2207 | /* |
| 2208 | * Should investigate if there's a performance win from allocating |
| 2209 | * more queues than interrupt vectors; it might allow the submission |
| 2210 | * path to scale better, even if the receive path is limited by the |
| 2211 | * number of interrupts. |
| 2212 | */ |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2213 | result = queue_request_irq(adminq); |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 2214 | if (result) { |
| 2215 | adminq->cq_vector = -1; |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 2216 | return result; |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 2217 | } |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 2218 | set_bit(NVMEQ_ENABLED, &adminq->flags); |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2219 | |
| 2220 | result = nvme_create_io_queues(dev); |
| 2221 | if (result || dev->online_queues < 2) |
| 2222 | return result; |
| 2223 | |
| 2224 | if (dev->online_queues - 1 < dev->max_qid) { |
| 2225 | nr_io_queues = dev->online_queues - 1; |
| 2226 | nvme_disable_io_queues(dev); |
| 2227 | nvme_suspend_io_queues(dev); |
| 2228 | goto retry; |
| 2229 | } |
| 2230 | dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", |
| 2231 | dev->io_queues[HCTX_TYPE_DEFAULT], |
| 2232 | dev->io_queues[HCTX_TYPE_READ], |
| 2233 | dev->io_queues[HCTX_TYPE_POLL]); |
| 2234 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2235 | } |
| 2236 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 2237 | static void nvme_del_queue_end(struct request *req, blk_status_t error) |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2238 | { |
| 2239 | struct nvme_queue *nvmeq = req->end_io_data; |
| 2240 | |
| 2241 | blk_mq_free_request(req); |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2242 | complete(&nvmeq->delete_done); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2243 | } |
| 2244 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 2245 | static void nvme_del_cq_end(struct request *req, blk_status_t error) |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2246 | { |
| 2247 | struct nvme_queue *nvmeq = req->end_io_data; |
| 2248 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2249 | if (error) |
| 2250 | set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2251 | |
| 2252 | nvme_del_queue_end(req, error); |
| 2253 | } |
| 2254 | |
| 2255 | static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) |
| 2256 | { |
| 2257 | struct request_queue *q = nvmeq->dev->ctrl.admin_q; |
| 2258 | struct request *req; |
| 2259 | struct nvme_command cmd; |
| 2260 | |
| 2261 | memset(&cmd, 0, sizeof(cmd)); |
| 2262 | cmd.delete_queue.opcode = opcode; |
| 2263 | cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); |
| 2264 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 2265 | req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2266 | if (IS_ERR(req)) |
| 2267 | return PTR_ERR(req); |
| 2268 | |
| 2269 | req->timeout = ADMIN_TIMEOUT; |
| 2270 | req->end_io_data = nvmeq; |
| 2271 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2272 | init_completion(&nvmeq->delete_done); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2273 | blk_execute_rq_nowait(q, NULL, req, false, |
| 2274 | opcode == nvme_admin_delete_cq ? |
| 2275 | nvme_del_cq_end : nvme_del_queue_end); |
| 2276 | return 0; |
| 2277 | } |
| 2278 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2279 | static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2280 | { |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2281 | int nr_queues = dev->online_queues - 1, sent = 0; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2282 | unsigned long timeout; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2283 | |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2284 | retry: |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2285 | timeout = ADMIN_TIMEOUT; |
| 2286 | while (nr_queues > 0) { |
| 2287 | if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) |
| 2288 | break; |
| 2289 | nr_queues--; |
| 2290 | sent++; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2291 | } |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2292 | while (sent) { |
| 2293 | struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; |
| 2294 | |
| 2295 | timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2296 | timeout); |
| 2297 | if (timeout == 0) |
| 2298 | return false; |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2299 | |
| 2300 | /* handle any remaining CQEs */ |
| 2301 | if (opcode == nvme_admin_delete_cq && |
| 2302 | !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags)) |
| 2303 | nvme_poll_irqdisable(nvmeq, -1); |
| 2304 | |
| 2305 | sent--; |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2306 | if (nr_queues) |
| 2307 | goto retry; |
| 2308 | } |
| 2309 | return true; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2310 | } |
| 2311 | |
Matthew Wilcox | 422ef0c | 2013-04-16 11:22:36 -0400 | [diff] [blame] | 2312 | /* |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2313 | * return error value only when tagset allocation failed |
Matthew Wilcox | 422ef0c | 2013-04-16 11:22:36 -0400 | [diff] [blame] | 2314 | */ |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2315 | static int nvme_dev_add(struct nvme_dev *dev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2316 | { |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2317 | int ret; |
| 2318 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2319 | if (!dev->ctrl.tagset) { |
Christoph Hellwig | 376f7ef | 2018-12-02 17:46:27 +0100 | [diff] [blame] | 2320 | dev->tagset.ops = &nvme_mq_ops; |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2321 | dev->tagset.nr_hw_queues = dev->online_queues - 1; |
Christoph Hellwig | ed92ad3 | 2018-12-14 14:06:59 +0100 | [diff] [blame] | 2322 | dev->tagset.nr_maps = 2; /* default + read */ |
| 2323 | if (dev->io_queues[HCTX_TYPE_POLL]) |
| 2324 | dev->tagset.nr_maps++; |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2325 | dev->tagset.timeout = NVME_IO_TIMEOUT; |
| 2326 | dev->tagset.numa_node = dev_to_node(dev->dev); |
| 2327 | dev->tagset.queue_depth = |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2328 | min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 2329 | dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false); |
| 2330 | if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) { |
| 2331 | dev->tagset.cmd_size = max(dev->tagset.cmd_size, |
| 2332 | nvme_pci_cmd_size(dev, true)); |
| 2333 | } |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2334 | dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; |
| 2335 | dev->tagset.driver_data = dev; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2336 | |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2337 | ret = blk_mq_alloc_tag_set(&dev->tagset); |
| 2338 | if (ret) { |
| 2339 | dev_warn(dev->ctrl.device, |
| 2340 | "IO queues tagset allocation failed %d\n", ret); |
| 2341 | return ret; |
| 2342 | } |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2343 | dev->ctrl.tagset = &dev->tagset; |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 2344 | |
| 2345 | nvme_dbbuf_set(dev); |
Keith Busch | 949928c | 2015-12-17 17:08:15 -0700 | [diff] [blame] | 2346 | } else { |
| 2347 | blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); |
| 2348 | |
| 2349 | /* Free previously allocated queues that are no longer usable */ |
| 2350 | nvme_free_queues(dev, dev->online_queues); |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2351 | } |
Keith Busch | 949928c | 2015-12-17 17:08:15 -0700 | [diff] [blame] | 2352 | |
Keith Busch | e1e5e56 | 2015-02-19 13:39:03 -0700 | [diff] [blame] | 2353 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2354 | } |
| 2355 | |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2356 | static int nvme_pci_enable(struct nvme_dev *dev) |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2357 | { |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2358 | int result = -ENOMEM; |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2359 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2360 | |
| 2361 | if (pci_enable_device_mem(pdev)) |
| 2362 | return result; |
| 2363 | |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2364 | pci_set_master(pdev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2365 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2366 | if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && |
| 2367 | dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) |
Russell King | 052d0ef | 2013-06-26 23:49:11 +0100 | [diff] [blame] | 2368 | goto disable; |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2369 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 2370 | if (readl(dev->bar + NVME_REG_CSTS) == -1) { |
Keith Busch | 0e53d18 | 2013-12-10 13:10:39 -0700 | [diff] [blame] | 2371 | result = -ENODEV; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2372 | goto disable; |
Keith Busch | 0e53d18 | 2013-12-10 13:10:39 -0700 | [diff] [blame] | 2373 | } |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2374 | |
| 2375 | /* |
Keith Busch | a522905 | 2016-04-08 16:09:10 -0600 | [diff] [blame] | 2376 | * Some devices and/or platforms don't advertise or work with INTx |
| 2377 | * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll |
| 2378 | * adjust this later. |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2379 | */ |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2380 | result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); |
| 2381 | if (result < 0) |
| 2382 | return result; |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2383 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2384 | dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 2385 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2386 | dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, |
weiping zhang | b27c1e6 | 2017-07-10 16:46:59 +0800 | [diff] [blame] | 2387 | io_queue_depth); |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2388 | dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 2389 | dev->dbs = dev->bar + 4096; |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2390 | |
| 2391 | /* |
| 2392 | * Temporary fix for the Apple controller found in the MacBook8,1 and |
| 2393 | * some MacBook7,1 to avoid controller resets and data loss. |
| 2394 | */ |
| 2395 | if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { |
| 2396 | dev->q_depth = 2; |
Christoph Hellwig | 9bdcfb1 | 2017-05-20 15:14:43 +0200 | [diff] [blame] | 2397 | dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " |
| 2398 | "set queue depth=%u to work around controller resets\n", |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2399 | dev->q_depth); |
Martin K. Petersen | d554b5e | 2017-06-27 22:27:57 -0400 | [diff] [blame] | 2400 | } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && |
| 2401 | (pdev->device == 0xa821 || pdev->device == 0xa822) && |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2402 | NVME_CAP_MQES(dev->ctrl.cap) == 0) { |
Martin K. Petersen | d554b5e | 2017-06-27 22:27:57 -0400 | [diff] [blame] | 2403 | dev->q_depth = 64; |
| 2404 | dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " |
| 2405 | "set queue depth=%u\n", dev->q_depth); |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2406 | } |
| 2407 | |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 2408 | nvme_map_cmb(dev); |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 2409 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2410 | pci_enable_pcie_error_reporting(pdev); |
| 2411 | pci_save_state(pdev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2412 | return 0; |
| 2413 | |
| 2414 | disable: |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2415 | pci_disable_device(pdev); |
| 2416 | return result; |
| 2417 | } |
| 2418 | |
| 2419 | static void nvme_dev_unmap(struct nvme_dev *dev) |
| 2420 | { |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2421 | if (dev->bar) |
| 2422 | iounmap(dev->bar); |
Johannes Thumshirn | a1f447b | 2016-06-07 09:44:02 +0200 | [diff] [blame] | 2423 | pci_release_mem_regions(to_pci_dev(dev->dev)); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2424 | } |
| 2425 | |
| 2426 | static void nvme_pci_disable(struct nvme_dev *dev) |
| 2427 | { |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2428 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2429 | |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2430 | pci_free_irq_vectors(pdev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2431 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2432 | if (pci_is_enabled(pdev)) { |
| 2433 | pci_disable_pcie_error_reporting(pdev); |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2434 | pci_disable_device(pdev); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 2435 | } |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 2436 | } |
| 2437 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2438 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2439 | { |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2440 | bool dead = true; |
| 2441 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 2442 | |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 2443 | mutex_lock(&dev->shutdown_lock); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2444 | if (pci_is_enabled(pdev)) { |
| 2445 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
| 2446 | |
Keith Busch | ebef736 | 2017-06-27 17:44:05 -0600 | [diff] [blame] | 2447 | if (dev->ctrl.state == NVME_CTRL_LIVE || |
| 2448 | dev->ctrl.state == NVME_CTRL_RESETTING) |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2449 | nvme_start_freeze(&dev->ctrl); |
| 2450 | dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || |
| 2451 | pdev->error_state != pci_channel_io_normal); |
Keith Busch | c9d3bf8 | 2015-01-07 18:55:52 -0700 | [diff] [blame] | 2452 | } |
Gabriel Krisman Bertazi | c21377f | 2016-08-11 09:35:57 -0600 | [diff] [blame] | 2453 | |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2454 | /* |
| 2455 | * Give the controller a chance to complete all entered requests if |
| 2456 | * doing a safe shutdown. |
| 2457 | */ |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2458 | if (!dead) { |
| 2459 | if (shutdown) |
| 2460 | nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); |
Jianchao Wang | 9a915a5 | 2018-02-12 20:57:24 +0800 | [diff] [blame] | 2461 | } |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2462 | |
Jianchao Wang | 9a915a5 | 2018-02-12 20:57:24 +0800 | [diff] [blame] | 2463 | nvme_stop_queues(&dev->ctrl); |
| 2464 | |
Keith Busch | 64ee0ac | 2018-04-12 09:16:08 -0600 | [diff] [blame] | 2465 | if (!dead && dev->ctrl.queue_count > 0) { |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2466 | nvme_disable_io_queues(dev); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2467 | nvme_disable_admin_queue(dev, shutdown); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 2468 | } |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2469 | nvme_suspend_io_queues(dev); |
| 2470 | nvme_suspend_queue(&dev->queues[0]); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2471 | nvme_pci_disable(dev); |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 2472 | |
Ming Lin | e1958e6 | 2016-05-18 14:05:01 -0700 | [diff] [blame] | 2473 | blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); |
| 2474 | blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2475 | |
| 2476 | /* |
| 2477 | * The driver will not be starting up queues again if shutting down so |
| 2478 | * must flush all entered requests to their failed completion to avoid |
| 2479 | * deadlocking blk-mq hot-cpu notifier. |
| 2480 | */ |
| 2481 | if (shutdown) |
| 2482 | nvme_start_queues(&dev->ctrl); |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 2483 | mutex_unlock(&dev->shutdown_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2484 | } |
| 2485 | |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2486 | static int nvme_setup_prp_pools(struct nvme_dev *dev) |
| 2487 | { |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2488 | dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2489 | PAGE_SIZE, PAGE_SIZE, 0); |
| 2490 | if (!dev->prp_page_pool) |
| 2491 | return -ENOMEM; |
| 2492 | |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 2493 | /* Optimisation for I/Os between 4k and 128k */ |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2494 | dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 2495 | 256, 256, 0); |
| 2496 | if (!dev->prp_small_pool) { |
| 2497 | dma_pool_destroy(dev->prp_page_pool); |
| 2498 | return -ENOMEM; |
| 2499 | } |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2500 | return 0; |
| 2501 | } |
| 2502 | |
| 2503 | static void nvme_release_prp_pools(struct nvme_dev *dev) |
| 2504 | { |
| 2505 | dma_pool_destroy(dev->prp_page_pool); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 2506 | dma_pool_destroy(dev->prp_small_pool); |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2507 | } |
| 2508 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2509 | static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2510 | { |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2511 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
Keith Busch | 9ac2709 | 2014-01-31 16:53:39 -0700 | [diff] [blame] | 2512 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 2513 | nvme_dbbuf_dma_free(dev); |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2514 | put_device(dev->dev); |
Keith Busch | 4af0e21 | 2015-06-08 10:08:13 -0600 | [diff] [blame] | 2515 | if (dev->tagset.tags) |
| 2516 | blk_mq_free_tag_set(&dev->tagset); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2517 | if (dev->ctrl.admin_q) |
| 2518 | blk_put_queue(dev->ctrl.admin_q); |
Keith Busch | 5e82e95 | 2013-02-19 10:17:58 -0700 | [diff] [blame] | 2519 | kfree(dev->queues); |
Scott Bauer | e286bcf | 2017-02-22 10:15:07 -0700 | [diff] [blame] | 2520 | free_opal_dev(dev->ctrl.opal_dev); |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2521 | mempool_destroy(dev->iod_mempool); |
Keith Busch | 5e82e95 | 2013-02-19 10:17:58 -0700 | [diff] [blame] | 2522 | kfree(dev); |
| 2523 | } |
| 2524 | |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2525 | static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) |
| 2526 | { |
Linus Torvalds | 237045f | 2016-03-18 17:13:31 -0700 | [diff] [blame] | 2527 | dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status); |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2528 | |
Christoph Hellwig | d22524a | 2017-10-18 13:25:42 +0200 | [diff] [blame] | 2529 | nvme_get_ctrl(&dev->ctrl); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2530 | nvme_dev_disable(dev, false); |
Jianchao Wang | 9f9cafc | 2018-06-20 13:42:22 +0800 | [diff] [blame] | 2531 | nvme_kill_queues(&dev->ctrl); |
Ming Lei | 03e0f3a | 2017-11-09 19:32:07 +0800 | [diff] [blame] | 2532 | if (!queue_work(nvme_wq, &dev->remove_work)) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2533 | nvme_put_ctrl(&dev->ctrl); |
| 2534 | } |
| 2535 | |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2536 | static void nvme_reset_work(struct work_struct *work) |
Keith Busch | 5e82e95 | 2013-02-19 10:17:58 -0700 | [diff] [blame] | 2537 | { |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2538 | struct nvme_dev *dev = |
| 2539 | container_of(work, struct nvme_dev, ctrl.reset_work); |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 2540 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2541 | int result = -ENODEV; |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2542 | enum nvme_ctrl_state new_state = NVME_CTRL_LIVE; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2543 | |
Rakesh Pandit | 82b057c | 2017-06-05 14:43:11 +0300 | [diff] [blame] | 2544 | if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2545 | goto out; |
| 2546 | |
| 2547 | /* |
| 2548 | * If we're called to reset a live controller first shut it down before |
| 2549 | * moving on. |
| 2550 | */ |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2551 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2552 | nvme_dev_disable(dev, false); |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2553 | |
Keith Busch | 5c959d7 | 2019-01-23 18:46:11 -0700 | [diff] [blame] | 2554 | mutex_lock(&dev->shutdown_lock); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2555 | result = nvme_pci_enable(dev); |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2556 | if (result) |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2557 | goto out; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2558 | |
Sagi Grimberg | 01ad099 | 2017-05-01 00:27:17 +0300 | [diff] [blame] | 2559 | result = nvme_pci_configure_admin_queue(dev); |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2560 | if (result) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2561 | goto out; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2562 | |
Keith Busch | 0fb59cb | 2015-01-07 18:55:50 -0700 | [diff] [blame] | 2563 | result = nvme_alloc_admin_tags(dev); |
| 2564 | if (result) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2565 | goto out; |
Dan McLeran | b9afca3 | 2014-04-07 17:10:11 -0600 | [diff] [blame] | 2566 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2567 | /* |
| 2568 | * Limit the max command size to prevent iod->sg allocations going |
| 2569 | * over a single page. |
| 2570 | */ |
| 2571 | dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; |
| 2572 | dev->ctrl.max_segments = NVME_MAX_SEGS; |
Keith Busch | 5c959d7 | 2019-01-23 18:46:11 -0700 | [diff] [blame] | 2573 | mutex_unlock(&dev->shutdown_lock); |
| 2574 | |
| 2575 | /* |
| 2576 | * Introduce CONNECTING state from nvme-fc/rdma transports to mark the |
| 2577 | * initializing procedure here. |
| 2578 | */ |
| 2579 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { |
| 2580 | dev_warn(dev->ctrl.device, |
| 2581 | "failed to mark controller CONNECTING\n"); |
| 2582 | goto out; |
| 2583 | } |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2584 | |
Christoph Hellwig | ce4541f | 2015-10-16 07:58:46 +0200 | [diff] [blame] | 2585 | result = nvme_init_identify(&dev->ctrl); |
| 2586 | if (result) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2587 | goto out; |
Christoph Hellwig | ce4541f | 2015-10-16 07:58:46 +0200 | [diff] [blame] | 2588 | |
Scott Bauer | e286bcf | 2017-02-22 10:15:07 -0700 | [diff] [blame] | 2589 | if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { |
| 2590 | if (!dev->ctrl.opal_dev) |
| 2591 | dev->ctrl.opal_dev = |
| 2592 | init_opal_dev(&dev->ctrl, &nvme_sec_submit); |
| 2593 | else if (was_suspend) |
| 2594 | opal_unlock_from_suspend(dev->ctrl.opal_dev); |
| 2595 | } else { |
| 2596 | free_opal_dev(dev->ctrl.opal_dev); |
| 2597 | dev->ctrl.opal_dev = NULL; |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 2598 | } |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 2599 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 2600 | if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { |
| 2601 | result = nvme_dbbuf_dma_alloc(dev); |
| 2602 | if (result) |
| 2603 | dev_warn(dev->dev, |
| 2604 | "unable to allocate dma for dbbuf\n"); |
| 2605 | } |
| 2606 | |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2607 | if (dev->ctrl.hmpre) { |
| 2608 | result = nvme_setup_host_mem(dev); |
| 2609 | if (result < 0) |
| 2610 | goto out; |
| 2611 | } |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2612 | |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2613 | result = nvme_setup_io_queues(dev); |
Keith Busch | badc34d | 2014-06-23 14:25:35 -0600 | [diff] [blame] | 2614 | if (result) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2615 | goto out; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2616 | |
Keith Busch | 21f033f | 2016-04-12 11:13:11 -0600 | [diff] [blame] | 2617 | /* |
Christoph Hellwig | 2659e57 | 2015-10-02 18:51:31 +0200 | [diff] [blame] | 2618 | * Keep the controller around but remove all namespaces if we don't have |
| 2619 | * any working I/O queue. |
| 2620 | */ |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2621 | if (dev->online_queues < 2) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 2622 | dev_warn(dev->ctrl.device, "IO queues not created\n"); |
Keith Busch | 3b24774 | 2016-04-27 15:51:18 -0600 | [diff] [blame] | 2623 | nvme_kill_queues(&dev->ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2624 | nvme_remove_namespaces(&dev->ctrl); |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2625 | new_state = NVME_CTRL_ADMIN_ONLY; |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2626 | } else { |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 2627 | nvme_start_queues(&dev->ctrl); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2628 | nvme_wait_freeze(&dev->ctrl); |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2629 | /* hit this only when allocate tagset fails */ |
| 2630 | if (nvme_dev_add(dev)) |
| 2631 | new_state = NVME_CTRL_ADMIN_ONLY; |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2632 | nvme_unfreeze(&dev->ctrl); |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2633 | } |
| 2634 | |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2635 | /* |
| 2636 | * If only admin queue live, keep it to do further investigation or |
| 2637 | * recovery. |
| 2638 | */ |
| 2639 | if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) { |
| 2640 | dev_warn(dev->ctrl.device, |
| 2641 | "failed to mark controller state %d\n", new_state); |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 2642 | goto out; |
| 2643 | } |
Christoph Hellwig | 92911a5 | 2016-04-26 13:51:58 +0200 | [diff] [blame] | 2644 | |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 2645 | nvme_start_ctrl(&dev->ctrl); |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2646 | return; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2647 | |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2648 | out: |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2649 | nvme_remove_dead_ctrl(dev, result); |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2650 | } |
| 2651 | |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 2652 | static void nvme_remove_dead_ctrl_work(struct work_struct *work) |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2653 | { |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 2654 | struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2655 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2656 | |
| 2657 | if (pci_get_drvdata(pdev)) |
Keith Busch | 921920a | 2016-03-28 16:03:21 -0600 | [diff] [blame] | 2658 | device_release_driver(&pdev->dev); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2659 | nvme_put_ctrl(&dev->ctrl); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2660 | } |
| 2661 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2662 | static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) |
Keith Busch | 4cc0652 | 2015-06-05 10:30:08 -0600 | [diff] [blame] | 2663 | { |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2664 | *val = readl(to_nvme_dev(ctrl)->bar + off); |
| 2665 | return 0; |
Keith Busch | 4cc0652 | 2015-06-05 10:30:08 -0600 | [diff] [blame] | 2666 | } |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2667 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 2668 | static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) |
| 2669 | { |
| 2670 | writel(val, to_nvme_dev(ctrl)->bar + off); |
| 2671 | return 0; |
| 2672 | } |
| 2673 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 2674 | static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) |
| 2675 | { |
| 2676 | *val = readq(to_nvme_dev(ctrl)->bar + off); |
| 2677 | return 0; |
| 2678 | } |
| 2679 | |
Keith Busch | 97c1222 | 2018-03-08 14:50:32 -0700 | [diff] [blame] | 2680 | static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) |
| 2681 | { |
| 2682 | struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); |
| 2683 | |
| 2684 | return snprintf(buf, size, "%s", dev_name(&pdev->dev)); |
| 2685 | } |
| 2686 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2687 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 2688 | .name = "pcie", |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 2689 | .module = THIS_MODULE, |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 2690 | .flags = NVME_F_METADATA_SUPPORTED | |
| 2691 | NVME_F_PCI_P2PDMA, |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2692 | .reg_read32 = nvme_pci_reg_read32, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 2693 | .reg_write32 = nvme_pci_reg_write32, |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 2694 | .reg_read64 = nvme_pci_reg_read64, |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2695 | .free_ctrl = nvme_pci_free_ctrl, |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2696 | .submit_async_event = nvme_pci_submit_async_event, |
Keith Busch | 97c1222 | 2018-03-08 14:50:32 -0700 | [diff] [blame] | 2697 | .get_address = nvme_pci_get_address, |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2698 | }; |
Keith Busch | 4cc0652 | 2015-06-05 10:30:08 -0600 | [diff] [blame] | 2699 | |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2700 | static int nvme_dev_map(struct nvme_dev *dev) |
| 2701 | { |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2702 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2703 | |
Johannes Thumshirn | a1f447b | 2016-06-07 09:44:02 +0200 | [diff] [blame] | 2704 | if (pci_request_mem_regions(pdev, "nvme")) |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2705 | return -ENODEV; |
| 2706 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 2707 | if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2708 | goto release; |
| 2709 | |
Max Gurtovoy | 9fa196e | 2016-12-19 16:18:24 +0200 | [diff] [blame] | 2710 | return 0; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2711 | release: |
Max Gurtovoy | 9fa196e | 2016-12-19 16:18:24 +0200 | [diff] [blame] | 2712 | pci_release_mem_regions(pdev); |
| 2713 | return -ENODEV; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2714 | } |
| 2715 | |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2716 | static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2717 | { |
| 2718 | if (pdev->vendor == 0x144d && pdev->device == 0xa802) { |
| 2719 | /* |
| 2720 | * Several Samsung devices seem to drop off the PCIe bus |
| 2721 | * randomly when APST is on and uses the deepest sleep state. |
| 2722 | * This has been observed on a Samsung "SM951 NVMe SAMSUNG |
| 2723 | * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD |
| 2724 | * 950 PRO 256GB", but it seems to be restricted to two Dell |
| 2725 | * laptops. |
| 2726 | */ |
| 2727 | if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && |
| 2728 | (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || |
| 2729 | dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) |
| 2730 | return NVME_QUIRK_NO_DEEPEST_PS; |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2731 | } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { |
| 2732 | /* |
| 2733 | * Samsung SSD 960 EVO drops off the PCIe bus after system |
Jarosław Janik | 467c77d4 | 2018-03-11 19:51:56 +0100 | [diff] [blame] | 2734 | * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as |
| 2735 | * within few minutes after bootup on a Coffee Lake board - |
| 2736 | * ASUS PRIME Z370-A |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2737 | */ |
| 2738 | if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && |
Jarosław Janik | 467c77d4 | 2018-03-11 19:51:56 +0100 | [diff] [blame] | 2739 | (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || |
| 2740 | dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2741 | return NVME_QUIRK_NO_APST; |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2742 | } |
| 2743 | |
| 2744 | return 0; |
| 2745 | } |
| 2746 | |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2747 | static void nvme_async_probe(void *data, async_cookie_t cookie) |
| 2748 | { |
| 2749 | struct nvme_dev *dev = data; |
Keith Busch | 80f513b | 2018-05-07 08:30:24 -0600 | [diff] [blame] | 2750 | |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2751 | nvme_reset_ctrl_sync(&dev->ctrl); |
| 2752 | flush_work(&dev->ctrl.scan_work); |
Keith Busch | 80f513b | 2018-05-07 08:30:24 -0600 | [diff] [blame] | 2753 | nvme_put_ctrl(&dev->ctrl); |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2754 | } |
| 2755 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2756 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2757 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2758 | int node, result = -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2759 | struct nvme_dev *dev; |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2760 | unsigned long quirks = id->driver_data; |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2761 | size_t alloc_size; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2762 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2763 | node = dev_to_node(&pdev->dev); |
| 2764 | if (node == NUMA_NO_NODE) |
Masayoshi Mizuma | 2fa8435 | 2016-06-20 09:33:17 +0900 | [diff] [blame] | 2765 | set_dev_node(&pdev->dev, first_memory_node); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2766 | |
| 2767 | dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2768 | if (!dev) |
| 2769 | return -ENOMEM; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 2770 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2771 | dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), |
| 2772 | GFP_KERNEL, node); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2773 | if (!dev->queues) |
| 2774 | goto free; |
| 2775 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2776 | dev->dev = get_device(&pdev->dev); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2777 | pci_set_drvdata(pdev, dev); |
Keith Busch | b3fffde | 2015-02-03 11:21:42 -0700 | [diff] [blame] | 2778 | |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2779 | result = nvme_dev_map(dev); |
| 2780 | if (result) |
Christophe JAILLET | b00c9b7 | 2017-07-16 10:39:03 +0200 | [diff] [blame] | 2781 | goto put_pci; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2782 | |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2783 | INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 2784 | INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 2785 | mutex_init(&dev->shutdown_lock); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2786 | |
| 2787 | result = nvme_setup_prp_pools(dev); |
| 2788 | if (result) |
Christophe JAILLET | b00c9b7 | 2017-07-16 10:39:03 +0200 | [diff] [blame] | 2789 | goto unmap; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2790 | |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2791 | quirks |= check_vendor_combination_bug(pdev); |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2792 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2793 | /* |
| 2794 | * Double check that our mempool alloc size will cover the biggest |
| 2795 | * command we support. |
| 2796 | */ |
| 2797 | alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ, |
| 2798 | NVME_MAX_SEGS, true); |
| 2799 | WARN_ON_ONCE(alloc_size > PAGE_SIZE); |
| 2800 | |
| 2801 | dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, |
| 2802 | mempool_kfree, |
| 2803 | (void *) alloc_size, |
| 2804 | GFP_KERNEL, node); |
| 2805 | if (!dev->iod_mempool) { |
| 2806 | result = -ENOMEM; |
| 2807 | goto release_pools; |
| 2808 | } |
| 2809 | |
Keith Busch | b6e44b4 | 2018-07-11 16:44:44 -0600 | [diff] [blame] | 2810 | result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, |
| 2811 | quirks); |
| 2812 | if (result) |
| 2813 | goto release_mempool; |
| 2814 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 2815 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
| 2816 | |
Keith Busch | 80f513b | 2018-05-07 08:30:24 -0600 | [diff] [blame] | 2817 | nvme_get_ctrl(&dev->ctrl); |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2818 | async_schedule(nvme_async_probe, dev); |
Sagi Grimberg | 4caff8f | 2017-12-31 14:01:19 +0200 | [diff] [blame] | 2819 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2820 | return 0; |
| 2821 | |
Keith Busch | b6e44b4 | 2018-07-11 16:44:44 -0600 | [diff] [blame] | 2822 | release_mempool: |
| 2823 | mempool_destroy(dev->iod_mempool); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2824 | release_pools: |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2825 | nvme_release_prp_pools(dev); |
Christophe JAILLET | b00c9b7 | 2017-07-16 10:39:03 +0200 | [diff] [blame] | 2826 | unmap: |
| 2827 | nvme_dev_unmap(dev); |
Keith Busch | a96d4f5 | 2014-08-19 19:15:59 -0600 | [diff] [blame] | 2828 | put_pci: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2829 | put_device(dev->dev); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2830 | free: |
| 2831 | kfree(dev->queues); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2832 | kfree(dev); |
| 2833 | return result; |
| 2834 | } |
| 2835 | |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2836 | static void nvme_reset_prepare(struct pci_dev *pdev) |
Keith Busch | f0d54a5 | 2014-05-02 10:40:43 -0600 | [diff] [blame] | 2837 | { |
Keith Busch | a673947 | 2014-06-23 16:03:21 -0600 | [diff] [blame] | 2838 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Linus Torvalds | f263fbb | 2017-07-08 15:51:57 -0700 | [diff] [blame] | 2839 | nvme_dev_disable(dev, false); |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2840 | } |
Keith Busch | f0d54a5 | 2014-05-02 10:40:43 -0600 | [diff] [blame] | 2841 | |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2842 | static void nvme_reset_done(struct pci_dev *pdev) |
| 2843 | { |
Linus Torvalds | f263fbb | 2017-07-08 15:51:57 -0700 | [diff] [blame] | 2844 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Sagi Grimberg | 79c48cc | 2018-01-14 12:39:00 +0200 | [diff] [blame] | 2845 | nvme_reset_ctrl_sync(&dev->ctrl); |
Keith Busch | f0d54a5 | 2014-05-02 10:40:43 -0600 | [diff] [blame] | 2846 | } |
| 2847 | |
Keith Busch | 09ece14 | 2014-01-27 11:29:40 -0500 | [diff] [blame] | 2848 | static void nvme_shutdown(struct pci_dev *pdev) |
| 2849 | { |
| 2850 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2851 | nvme_dev_disable(dev, true); |
Keith Busch | 09ece14 | 2014-01-27 11:29:40 -0500 | [diff] [blame] | 2852 | } |
| 2853 | |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2854 | /* |
| 2855 | * The driver's remove may be called on a device in a partially initialized |
| 2856 | * state. This function must not have any dependencies on the device state in |
| 2857 | * order to proceed. |
| 2858 | */ |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2859 | static void nvme_remove(struct pci_dev *pdev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2860 | { |
| 2861 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2862 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 2863 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2864 | pci_set_drvdata(pdev, NULL); |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2865 | |
Keith Busch | 6db28ed | 2017-02-10 18:15:49 -0500 | [diff] [blame] | 2866 | if (!pci_device_is_present(pdev)) { |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2867 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); |
Keith Busch | 1d39e69 | 2018-06-06 08:13:08 -0600 | [diff] [blame] | 2868 | nvme_dev_disable(dev, true); |
Keith Busch | cb4bfda | 2018-10-15 10:19:06 -0600 | [diff] [blame] | 2869 | nvme_dev_remove_admin(dev); |
Keith Busch | 6db28ed | 2017-02-10 18:15:49 -0500 | [diff] [blame] | 2870 | } |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2871 | |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2872 | flush_work(&dev->ctrl.reset_work); |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 2873 | nvme_stop_ctrl(&dev->ctrl); |
| 2874 | nvme_remove_namespaces(&dev->ctrl); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2875 | nvme_dev_disable(dev, true); |
Keith Busch | 9fe5c59 | 2018-10-31 13:15:29 -0600 | [diff] [blame] | 2876 | nvme_release_cmb(dev); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2877 | nvme_free_host_mem(dev); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2878 | nvme_dev_remove_admin(dev); |
| 2879 | nvme_free_queues(dev, 0); |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 2880 | nvme_uninit_ctrl(&dev->ctrl); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2881 | nvme_release_prp_pools(dev); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2882 | nvme_dev_unmap(dev); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2883 | nvme_put_ctrl(&dev->ctrl); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2884 | } |
| 2885 | |
Jingoo Han | 671a601 | 2014-02-13 11:19:14 +0900 | [diff] [blame] | 2886 | #ifdef CONFIG_PM_SLEEP |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 2887 | static int nvme_suspend(struct device *dev) |
| 2888 | { |
| 2889 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2890 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
| 2891 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2892 | nvme_dev_disable(ndev, true); |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 2893 | return 0; |
| 2894 | } |
| 2895 | |
| 2896 | static int nvme_resume(struct device *dev) |
| 2897 | { |
| 2898 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2899 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 2900 | |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2901 | nvme_reset_ctrl(&ndev->ctrl); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2902 | return 0; |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 2903 | } |
Jingoo Han | 671a601 | 2014-02-13 11:19:14 +0900 | [diff] [blame] | 2904 | #endif |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 2905 | |
| 2906 | static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2907 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2908 | static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, |
| 2909 | pci_channel_state_t state) |
| 2910 | { |
| 2911 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 2912 | |
| 2913 | /* |
| 2914 | * A frozen channel requires a reset. When detected, this method will |
| 2915 | * shutdown the controller to quiesce. The controller will be restarted |
| 2916 | * after the slot reset through driver's slot_reset callback. |
| 2917 | */ |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2918 | switch (state) { |
| 2919 | case pci_channel_io_normal: |
| 2920 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 2921 | case pci_channel_io_frozen: |
Keith Busch | d011fb3 | 2016-04-04 15:07:41 -0600 | [diff] [blame] | 2922 | dev_warn(dev->ctrl.device, |
| 2923 | "frozen state error detected, reset controller\n"); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2924 | nvme_dev_disable(dev, false); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2925 | return PCI_ERS_RESULT_NEED_RESET; |
| 2926 | case pci_channel_io_perm_failure: |
Keith Busch | d011fb3 | 2016-04-04 15:07:41 -0600 | [diff] [blame] | 2927 | dev_warn(dev->ctrl.device, |
| 2928 | "failure state error detected, request disconnect\n"); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2929 | return PCI_ERS_RESULT_DISCONNECT; |
| 2930 | } |
| 2931 | return PCI_ERS_RESULT_NEED_RESET; |
| 2932 | } |
| 2933 | |
| 2934 | static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) |
| 2935 | { |
| 2936 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 2937 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 2938 | dev_info(dev->ctrl.device, "restart after slot reset\n"); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2939 | pci_restore_state(pdev); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2940 | nvme_reset_ctrl(&dev->ctrl); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2941 | return PCI_ERS_RESULT_RECOVERED; |
| 2942 | } |
| 2943 | |
| 2944 | static void nvme_error_resume(struct pci_dev *pdev) |
| 2945 | { |
Keith Busch | 72cd4cc | 2018-05-24 16:16:04 -0600 | [diff] [blame] | 2946 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 2947 | |
| 2948 | flush_work(&dev->ctrl.reset_work); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2949 | } |
| 2950 | |
Stephen Hemminger | 1d35203 | 2012-09-07 09:33:17 -0700 | [diff] [blame] | 2951 | static const struct pci_error_handlers nvme_err_handler = { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2952 | .error_detected = nvme_error_detected, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2953 | .slot_reset = nvme_slot_reset, |
| 2954 | .resume = nvme_error_resume, |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2955 | .reset_prepare = nvme_reset_prepare, |
| 2956 | .reset_done = nvme_reset_done, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2957 | }; |
| 2958 | |
Matthew Wilcox | 6eb0d69 | 2014-03-24 10:11:22 -0400 | [diff] [blame] | 2959 | static const struct pci_device_id nvme_id_table[] = { |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 2960 | { PCI_VDEVICE(INTEL, 0x0953), |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 2961 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 2962 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
Keith Busch | 99466e7 | 2016-05-02 15:14:24 -0600 | [diff] [blame] | 2963 | { PCI_VDEVICE(INTEL, 0x0a53), |
| 2964 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 2965 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
Keith Busch | 99466e7 | 2016-05-02 15:14:24 -0600 | [diff] [blame] | 2966 | { PCI_VDEVICE(INTEL, 0x0a54), |
| 2967 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 2968 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
David Wayne Fugate | f99cb7af | 2017-07-10 12:39:59 -0600 | [diff] [blame] | 2969 | { PCI_VDEVICE(INTEL, 0x0a55), |
| 2970 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
| 2971 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
Andy Lutomirski | 50af47d | 2017-05-24 15:06:31 -0700 | [diff] [blame] | 2972 | { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 2973 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
| 2974 | NVME_QUIRK_MEDIUM_PRIO_SQ }, |
James Dingwall | 6299358 | 2019-01-08 10:20:51 -0700 | [diff] [blame] | 2975 | { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ |
| 2976 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2977 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ |
| 2978 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, |
Micah Parrish | 0302ae6 | 2018-04-12 13:25:25 -0600 | [diff] [blame] | 2979 | { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ |
| 2980 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 2981 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ |
| 2982 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Jeff Lien | 8c97eec | 2017-11-21 10:44:37 -0600 | [diff] [blame] | 2983 | { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ |
| 2984 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Wenbo Wang | 015282c | 2016-09-08 12:12:11 -0400 | [diff] [blame] | 2985 | { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ |
| 2986 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Martin K. Petersen | d554b5e | 2017-06-27 22:27:57 -0400 | [diff] [blame] | 2987 | { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ |
| 2988 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
| 2989 | { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ |
| 2990 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Christoph Hellwig | 608cc4b | 2017-09-06 11:45:24 +0200 | [diff] [blame] | 2991 | { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ |
| 2992 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
| 2993 | { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ |
| 2994 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
Wei Xu | ea48e87 | 2018-04-26 14:59:19 -0600 | [diff] [blame] | 2995 | { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ |
| 2996 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2997 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
Stephan Günther | c74dc78 | 2015-11-04 00:49:45 +0100 | [diff] [blame] | 2998 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, |
Daniel Roschka | 124298b | 2017-02-22 15:17:29 -0700 | [diff] [blame] | 2999 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3000 | { 0, } |
| 3001 | }; |
| 3002 | MODULE_DEVICE_TABLE(pci, nvme_id_table); |
| 3003 | |
| 3004 | static struct pci_driver nvme_driver = { |
| 3005 | .name = "nvme", |
| 3006 | .id_table = nvme_id_table, |
| 3007 | .probe = nvme_probe, |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 3008 | .remove = nvme_remove, |
Keith Busch | 09ece14 | 2014-01-27 11:29:40 -0500 | [diff] [blame] | 3009 | .shutdown = nvme_shutdown, |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 3010 | .driver = { |
| 3011 | .pm = &nvme_dev_pm_ops, |
| 3012 | }, |
Alexander Duyck | 74d986a | 2018-04-24 16:47:27 -0500 | [diff] [blame] | 3013 | .sriov_configure = pci_sriov_configure_simple, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3014 | .err_handler = &nvme_err_handler, |
| 3015 | }; |
| 3016 | |
| 3017 | static int __init nvme_init(void) |
| 3018 | { |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 3019 | return pci_register_driver(&nvme_driver); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3020 | } |
| 3021 | |
| 3022 | static void __exit nvme_exit(void) |
| 3023 | { |
| 3024 | pci_unregister_driver(&nvme_driver); |
Ming Lei | 03e0f3a | 2017-11-09 19:32:07 +0800 | [diff] [blame] | 3025 | flush_workqueue(nvme_wq); |
Matthew Wilcox | 21bd78b | 2014-05-09 22:42:26 -0400 | [diff] [blame] | 3026 | _nvme_check_size(); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3027 | } |
| 3028 | |
| 3029 | MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); |
| 3030 | MODULE_LICENSE("GPL"); |
Keith Busch | c78b4713 | 2014-11-21 15:16:32 -0700 | [diff] [blame] | 3031 | MODULE_VERSION("1.0"); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3032 | module_init(nvme_init); |
| 3033 | module_exit(nvme_exit); |