Christoph Hellwig | 5f37396 | 2019-02-18 09:36:08 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2 | /* |
| 3 | * NVM Express device driver |
Matthew Wilcox | 6eb0d69 | 2014-03-24 10:11:22 -0400 | [diff] [blame] | 4 | * Copyright (c) 2011-2014, Intel Corporation. |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 5 | */ |
| 6 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 7 | #include <linux/aer.h> |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 8 | #include <linux/async.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 9 | #include <linux/blkdev.h> |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 10 | #include <linux/blk-mq.h> |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 11 | #include <linux/blk-mq-pci.h> |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 12 | #include <linux/dmi.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 13 | #include <linux/init.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 16 | #include <linux/mm.h> |
| 17 | #include <linux/module.h> |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 18 | #include <linux/mutex.h> |
Keith Busch | d087747 | 2017-09-15 13:05:38 -0400 | [diff] [blame] | 19 | #include <linux/once.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 20 | #include <linux/pci.h> |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 21 | #include <linux/suspend.h> |
Keith Busch | e1e5e56 | 2015-02-19 13:39:03 -0700 | [diff] [blame] | 22 | #include <linux/t10-pi.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 23 | #include <linux/types.h> |
Linus Torvalds | 9cf5c09 | 2015-11-06 14:22:15 -0800 | [diff] [blame] | 24 | #include <linux/io-64-nonatomic-lo-hi.h> |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 25 | #include <linux/sed-opal.h> |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 26 | #include <linux/pci-p2pdma.h> |
Hitoshi Mitake | 797a796 | 2012-02-07 11:45:33 +0900 | [diff] [blame] | 27 | |
yupeng | 604c01d | 2018-12-18 17:59:53 +0100 | [diff] [blame] | 28 | #include "trace.h" |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 29 | #include "nvme.h" |
| 30 | |
Benjamin Herrenschmidt | c1e0cc7 | 2019-08-07 17:51:20 +1000 | [diff] [blame] | 31 | #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 32 | #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) |
Stephen Bates | c965809 | 2016-12-16 11:54:50 -0700 | [diff] [blame] | 33 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 34 | #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 35 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 36 | /* |
| 37 | * These can be higher, but we need to ensure that any command doesn't |
| 38 | * require an sg allocation that needs more than a page of data. |
| 39 | */ |
| 40 | #define NVME_MAX_KB_SZ 4096 |
| 41 | #define NVME_MAX_SEGS 127 |
| 42 | |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 43 | static int use_threaded_interrupts; |
| 44 | module_param(use_threaded_interrupts, int, 0); |
| 45 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 46 | static bool use_cmb_sqes = true; |
Keith Busch | 69f4eb9 | 2018-06-06 08:13:09 -0600 | [diff] [blame] | 47 | module_param(use_cmb_sqes, bool, 0444); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 48 | MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); |
| 49 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 50 | static unsigned int max_host_mem_size_mb = 128; |
| 51 | module_param(max_host_mem_size_mb, uint, 0444); |
| 52 | MODULE_PARM_DESC(max_host_mem_size_mb, |
| 53 | "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 54 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 55 | static unsigned int sgl_threshold = SZ_32K; |
| 56 | module_param(sgl_threshold, uint, 0644); |
| 57 | MODULE_PARM_DESC(sgl_threshold, |
| 58 | "Use SGLs when average request segment size is larger or equal to " |
| 59 | "this size. Use 0 to disable SGLs."); |
| 60 | |
weiping zhang | b27c1e6 | 2017-07-10 16:46:59 +0800 | [diff] [blame] | 61 | static int io_queue_depth_set(const char *val, const struct kernel_param *kp); |
| 62 | static const struct kernel_param_ops io_queue_depth_ops = { |
| 63 | .set = io_queue_depth_set, |
| 64 | .get = param_get_int, |
| 65 | }; |
| 66 | |
| 67 | static int io_queue_depth = 1024; |
| 68 | module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); |
| 69 | MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); |
| 70 | |
Keith Busch | 3f68baf | 2019-12-07 01:51:54 +0900 | [diff] [blame] | 71 | static unsigned int write_queues; |
| 72 | module_param(write_queues, uint, 0644); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 73 | MODULE_PARM_DESC(write_queues, |
| 74 | "Number of queues to use for writes. If not set, reads and writes " |
| 75 | "will share a queue set."); |
| 76 | |
Keith Busch | 3f68baf | 2019-12-07 01:51:54 +0900 | [diff] [blame] | 77 | static unsigned int poll_queues; |
| 78 | module_param(poll_queues, uint, 0644); |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 79 | MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); |
| 80 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 81 | struct nvme_dev; |
| 82 | struct nvme_queue; |
Keith Busch | b3fffde | 2015-02-03 11:21:42 -0700 | [diff] [blame] | 83 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 84 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 85 | static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); |
Keith Busch | d4b4ff8 | 2013-12-10 13:10:37 -0700 | [diff] [blame] | 86 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 87 | /* |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 88 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
| 89 | */ |
| 90 | struct nvme_dev { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 91 | struct nvme_queue *queues; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 92 | struct blk_mq_tag_set tagset; |
| 93 | struct blk_mq_tag_set admin_tagset; |
| 94 | u32 __iomem *dbs; |
| 95 | struct device *dev; |
| 96 | struct dma_pool *prp_page_pool; |
| 97 | struct dma_pool *prp_small_pool; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 98 | unsigned online_queues; |
| 99 | unsigned max_qid; |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 100 | unsigned io_queues[HCTX_MAX_TYPES]; |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 101 | unsigned int num_vecs; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 102 | int q_depth; |
Benjamin Herrenschmidt | c1e0cc7 | 2019-08-07 17:51:20 +1000 | [diff] [blame] | 103 | int io_sqes; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 104 | u32 db_stride; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 105 | void __iomem *bar; |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 106 | unsigned long bar_mapped_size; |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 107 | struct work_struct remove_work; |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 108 | struct mutex shutdown_lock; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 109 | bool subsystem; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 110 | u64 cmb_size; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 111 | bool cmb_use_sqes; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 112 | u32 cmbsz; |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 113 | u32 cmbloc; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 114 | struct nvme_ctrl ctrl; |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 115 | u32 last_ps; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 116 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 117 | mempool_t *iod_mempool; |
| 118 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 119 | /* shadow doorbell buffer support: */ |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 120 | u32 *dbbuf_dbs; |
| 121 | dma_addr_t dbbuf_dbs_dma_addr; |
| 122 | u32 *dbbuf_eis; |
| 123 | dma_addr_t dbbuf_eis_dma_addr; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 124 | |
| 125 | /* host memory buffer support: */ |
| 126 | u64 host_mem_size; |
| 127 | u32 nr_host_mem_descs; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 128 | dma_addr_t host_mem_descs_dma; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 129 | struct nvme_host_mem_buf_desc *host_mem_descs; |
| 130 | void **host_mem_desc_bufs; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 131 | }; |
| 132 | |
weiping zhang | b27c1e6 | 2017-07-10 16:46:59 +0800 | [diff] [blame] | 133 | static int io_queue_depth_set(const char *val, const struct kernel_param *kp) |
| 134 | { |
| 135 | int n = 0, ret; |
| 136 | |
| 137 | ret = kstrtoint(val, 10, &n); |
| 138 | if (ret != 0 || n < 2) |
| 139 | return -EINVAL; |
| 140 | |
| 141 | return param_set_int(val, kp); |
| 142 | } |
| 143 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 144 | static inline unsigned int sq_idx(unsigned int qid, u32 stride) |
| 145 | { |
| 146 | return qid * 2 * stride; |
| 147 | } |
| 148 | |
| 149 | static inline unsigned int cq_idx(unsigned int qid, u32 stride) |
| 150 | { |
| 151 | return (qid * 2 + 1) * stride; |
| 152 | } |
| 153 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 154 | static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) |
| 155 | { |
| 156 | return container_of(ctrl, struct nvme_dev, ctrl); |
| 157 | } |
| 158 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 159 | /* |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 160 | * An NVM Express queue. Each device has at least two (one for admin |
| 161 | * commands and one for I/O commands). |
| 162 | */ |
| 163 | struct nvme_queue { |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 164 | struct nvme_dev *dev; |
Jens Axboe | 1ab0cd6 | 2018-05-17 18:31:51 +0200 | [diff] [blame] | 165 | spinlock_t sq_lock; |
Benjamin Herrenschmidt | c1e0cc7 | 2019-08-07 17:51:20 +1000 | [diff] [blame] | 166 | void *sq_cmds; |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 167 | /* only used for poll queues: */ |
| 168 | spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 169 | volatile struct nvme_completion *cqes; |
| 170 | dma_addr_t sq_dma_addr; |
| 171 | dma_addr_t cq_dma_addr; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 172 | u32 __iomem *q_db; |
| 173 | u16 q_depth; |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 174 | u16 cq_vector; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 175 | u16 sq_tail; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 176 | u16 last_sq_tail; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 177 | u16 cq_head; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 178 | u16 qid; |
Matthew Wilcox | e9539f4 | 2013-06-24 11:47:34 -0400 | [diff] [blame] | 179 | u8 cq_phase; |
Benjamin Herrenschmidt | c1e0cc7 | 2019-08-07 17:51:20 +1000 | [diff] [blame] | 180 | u8 sqes; |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 181 | unsigned long flags; |
| 182 | #define NVMEQ_ENABLED 0 |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 183 | #define NVMEQ_SQ_CMB 1 |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 184 | #define NVMEQ_DELETE_ERROR 2 |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 185 | #define NVMEQ_POLLED 3 |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 186 | u32 *dbbuf_sq_db; |
| 187 | u32 *dbbuf_cq_db; |
| 188 | u32 *dbbuf_sq_ei; |
| 189 | u32 *dbbuf_cq_ei; |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 190 | struct completion delete_done; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 191 | }; |
| 192 | |
| 193 | /* |
Christoph Hellwig | 9b04811 | 2019-03-03 08:04:01 -0700 | [diff] [blame] | 194 | * The nvme_iod describes the data in an I/O. |
| 195 | * |
| 196 | * The sg pointer contains the list of PRP/SGL chunk allocations in addition |
| 197 | * to the actual struct scatterlist. |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 198 | */ |
| 199 | struct nvme_iod { |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 200 | struct nvme_request req; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 201 | struct nvme_queue *nvmeq; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 202 | bool use_sgl; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 203 | int aborted; |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 204 | int npages; /* In the PRP list. 0 means small pool in use */ |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 205 | int nents; /* Used in scatterlist */ |
Christoph Hellwig | 71bd150 | 2015-10-16 07:58:32 +0200 | [diff] [blame] | 206 | dma_addr_t first_dma; |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 207 | unsigned int dma_len; /* length of single DMA segment mapping */ |
Christoph Hellwig | 783b94b | 2019-03-03 08:19:18 -0700 | [diff] [blame] | 208 | dma_addr_t meta_dma; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 209 | struct scatterlist *sg; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 210 | }; |
| 211 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 212 | static unsigned int max_io_queues(void) |
| 213 | { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 214 | return num_possible_cpus() + write_queues + poll_queues; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | static unsigned int max_queue_count(void) |
| 218 | { |
| 219 | /* IO queues + admin queue */ |
| 220 | return 1 + max_io_queues(); |
| 221 | } |
| 222 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 223 | static inline unsigned int nvme_dbbuf_size(u32 stride) |
| 224 | { |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 225 | return (max_queue_count() * 8 * stride); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) |
| 229 | { |
| 230 | unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); |
| 231 | |
| 232 | if (dev->dbbuf_dbs) |
| 233 | return 0; |
| 234 | |
| 235 | dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, |
| 236 | &dev->dbbuf_dbs_dma_addr, |
| 237 | GFP_KERNEL); |
| 238 | if (!dev->dbbuf_dbs) |
| 239 | return -ENOMEM; |
| 240 | dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, |
| 241 | &dev->dbbuf_eis_dma_addr, |
| 242 | GFP_KERNEL); |
| 243 | if (!dev->dbbuf_eis) { |
| 244 | dma_free_coherent(dev->dev, mem_size, |
| 245 | dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); |
| 246 | dev->dbbuf_dbs = NULL; |
| 247 | return -ENOMEM; |
| 248 | } |
| 249 | |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | static void nvme_dbbuf_dma_free(struct nvme_dev *dev) |
| 254 | { |
| 255 | unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); |
| 256 | |
| 257 | if (dev->dbbuf_dbs) { |
| 258 | dma_free_coherent(dev->dev, mem_size, |
| 259 | dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); |
| 260 | dev->dbbuf_dbs = NULL; |
| 261 | } |
| 262 | if (dev->dbbuf_eis) { |
| 263 | dma_free_coherent(dev->dev, mem_size, |
| 264 | dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); |
| 265 | dev->dbbuf_eis = NULL; |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | static void nvme_dbbuf_init(struct nvme_dev *dev, |
| 270 | struct nvme_queue *nvmeq, int qid) |
| 271 | { |
| 272 | if (!dev->dbbuf_dbs || !qid) |
| 273 | return; |
| 274 | |
| 275 | nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; |
| 276 | nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; |
| 277 | nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; |
| 278 | nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; |
| 279 | } |
| 280 | |
| 281 | static void nvme_dbbuf_set(struct nvme_dev *dev) |
| 282 | { |
| 283 | struct nvme_command c; |
| 284 | |
| 285 | if (!dev->dbbuf_dbs) |
| 286 | return; |
| 287 | |
| 288 | memset(&c, 0, sizeof(c)); |
| 289 | c.dbbuf.opcode = nvme_admin_dbbuf; |
| 290 | c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); |
| 291 | c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); |
| 292 | |
| 293 | if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { |
Christoph Hellwig | 9bdcfb1 | 2017-05-20 15:14:43 +0200 | [diff] [blame] | 294 | dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 295 | /* Free memory and continue on */ |
| 296 | nvme_dbbuf_dma_free(dev); |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) |
| 301 | { |
| 302 | return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); |
| 303 | } |
| 304 | |
| 305 | /* Update dbbuf and return true if an MMIO is required */ |
| 306 | static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, |
| 307 | volatile u32 *dbbuf_ei) |
| 308 | { |
| 309 | if (dbbuf_db) { |
| 310 | u16 old_value; |
| 311 | |
| 312 | /* |
| 313 | * Ensure that the queue is written before updating |
| 314 | * the doorbell in memory |
| 315 | */ |
| 316 | wmb(); |
| 317 | |
| 318 | old_value = *dbbuf_db; |
| 319 | *dbbuf_db = value; |
| 320 | |
Michal Wnukowski | f1ed3df | 2018-08-15 15:51:57 -0700 | [diff] [blame] | 321 | /* |
| 322 | * Ensure that the doorbell is updated before reading the event |
| 323 | * index from memory. The controller needs to provide similar |
| 324 | * ordering to ensure the envent index is updated before reading |
| 325 | * the doorbell. |
| 326 | */ |
| 327 | mb(); |
| 328 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 329 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
| 330 | return false; |
| 331 | } |
| 332 | |
| 333 | return true; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 334 | } |
| 335 | |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 336 | /* |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 337 | * Will slightly overestimate the number of pages needed. This is OK |
| 338 | * as it only leads to a small amount of wasted memory for the lifetime of |
| 339 | * the I/O. |
| 340 | */ |
| 341 | static int nvme_npages(unsigned size, struct nvme_dev *dev) |
| 342 | { |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 343 | unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, |
| 344 | dev->ctrl.page_size); |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 345 | return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); |
| 346 | } |
| 347 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 348 | /* |
| 349 | * Calculates the number of pages needed for the SGL segments. For example a 4k |
| 350 | * page can accommodate 256 SGL descriptors. |
| 351 | */ |
| 352 | static int nvme_pci_npages_sgl(unsigned int num_seg) |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 353 | { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 354 | return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE); |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 355 | } |
| 356 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 357 | static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev, |
| 358 | unsigned int size, unsigned int nseg, bool use_sgl) |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 359 | { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 360 | size_t alloc_size; |
| 361 | |
| 362 | if (use_sgl) |
| 363 | alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg); |
| 364 | else |
| 365 | alloc_size = sizeof(__le64 *) * nvme_npages(size, dev); |
| 366 | |
| 367 | return alloc_size + sizeof(struct scatterlist) * nseg; |
| 368 | } |
| 369 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 370 | static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
| 371 | unsigned int hctx_idx) |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 372 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 373 | struct nvme_dev *dev = data; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 374 | struct nvme_queue *nvmeq = &dev->queues[0]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 375 | |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 376 | WARN_ON(hctx_idx != 0); |
| 377 | WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 378 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 379 | hctx->driver_data = nvmeq; |
| 380 | return 0; |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 381 | } |
| 382 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 383 | static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
| 384 | unsigned int hctx_idx) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 385 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 386 | struct nvme_dev *dev = data; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 387 | struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 388 | |
Keith Busch | 4248322 | 2015-06-01 09:29:54 -0600 | [diff] [blame] | 389 | WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 390 | hctx->driver_data = nvmeq; |
| 391 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 392 | } |
| 393 | |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 394 | static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, |
| 395 | unsigned int hctx_idx, unsigned int numa_node) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 396 | { |
Christoph Hellwig | d6296d39 | 2017-05-01 10:19:08 -0600 | [diff] [blame] | 397 | struct nvme_dev *dev = set->driver_data; |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 398 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | 0350815 | 2017-06-13 09:15:18 +0200 | [diff] [blame] | 399 | int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 400 | struct nvme_queue *nvmeq = &dev->queues[queue_idx]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 401 | |
| 402 | BUG_ON(!nvmeq); |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 403 | iod->nvmeq = nvmeq; |
Sagi Grimberg | 59e29ce | 2018-06-29 16:50:00 -0600 | [diff] [blame] | 404 | |
| 405 | nvme_req(req)->ctrl = &dev->ctrl; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 406 | return 0; |
| 407 | } |
| 408 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 409 | static int queue_irq_offset(struct nvme_dev *dev) |
| 410 | { |
| 411 | /* if we have more than 1 vec, admin queue offsets us by 1 */ |
| 412 | if (dev->num_vecs > 1) |
| 413 | return 1; |
| 414 | |
| 415 | return 0; |
| 416 | } |
| 417 | |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 418 | static int nvme_pci_map_queues(struct blk_mq_tag_set *set) |
| 419 | { |
| 420 | struct nvme_dev *dev = set->driver_data; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 421 | int i, qoff, offset; |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 422 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 423 | offset = queue_irq_offset(dev); |
| 424 | for (i = 0, qoff = 0; i < set->nr_maps; i++) { |
| 425 | struct blk_mq_queue_map *map = &set->map[i]; |
| 426 | |
| 427 | map->nr_queues = dev->io_queues[i]; |
| 428 | if (!map->nr_queues) { |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 429 | BUG_ON(i == HCTX_TYPE_DEFAULT); |
Christoph Hellwig | 7e849dd | 2018-12-17 12:16:27 +0100 | [diff] [blame] | 430 | continue; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 431 | } |
| 432 | |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 433 | /* |
| 434 | * The poll queue(s) doesn't have an IRQ (and hence IRQ |
| 435 | * affinity), so use the regular blk-mq cpu mapping |
| 436 | */ |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 437 | map->queue_offset = qoff; |
Keith Busch | cb9e0e5 | 2019-05-21 10:56:43 -0600 | [diff] [blame] | 438 | if (i != HCTX_TYPE_POLL && offset) |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 439 | blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); |
| 440 | else |
| 441 | blk_mq_map_queues(map); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 442 | qoff += map->nr_queues; |
| 443 | offset += map->nr_queues; |
| 444 | } |
| 445 | |
| 446 | return 0; |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 447 | } |
| 448 | |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 449 | /* |
| 450 | * Write sq tail if we are asked to, or if the next command would wrap. |
| 451 | */ |
| 452 | static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) |
| 453 | { |
| 454 | if (!write_sq) { |
| 455 | u16 next_tail = nvmeq->sq_tail + 1; |
| 456 | |
| 457 | if (next_tail == nvmeq->q_depth) |
| 458 | next_tail = 0; |
| 459 | if (next_tail != nvmeq->last_sq_tail) |
| 460 | return; |
| 461 | } |
| 462 | |
| 463 | if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, |
| 464 | nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) |
| 465 | writel(nvmeq->sq_tail, nvmeq->q_db); |
| 466 | nvmeq->last_sq_tail = nvmeq->sq_tail; |
| 467 | } |
| 468 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 469 | /** |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 470 | * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 471 | * @nvmeq: The queue to use |
| 472 | * @cmd: The command to send |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 473 | * @write_sq: whether to write to the SQ doorbell |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 474 | */ |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 475 | static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, |
| 476 | bool write_sq) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 477 | { |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 478 | spin_lock(&nvmeq->sq_lock); |
Benjamin Herrenschmidt | c1e0cc7 | 2019-08-07 17:51:20 +1000 | [diff] [blame] | 479 | memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), |
| 480 | cmd, sizeof(*cmd)); |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 481 | if (++nvmeq->sq_tail == nvmeq->q_depth) |
| 482 | nvmeq->sq_tail = 0; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 483 | nvme_write_sq_db(nvmeq, write_sq); |
| 484 | spin_unlock(&nvmeq->sq_lock); |
| 485 | } |
| 486 | |
| 487 | static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) |
| 488 | { |
| 489 | struct nvme_queue *nvmeq = hctx->driver_data; |
| 490 | |
| 491 | spin_lock(&nvmeq->sq_lock); |
| 492 | if (nvmeq->sq_tail != nvmeq->last_sq_tail) |
| 493 | nvme_write_sq_db(nvmeq, true); |
Christoph Hellwig | 90ea5ca | 2018-05-26 13:45:55 +0200 | [diff] [blame] | 494 | spin_unlock(&nvmeq->sq_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 495 | } |
| 496 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 497 | static void **nvme_pci_iod_list(struct request *req) |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 498 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 499 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 500 | return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 501 | } |
| 502 | |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 503 | static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) |
| 504 | { |
| 505 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Keith Busch | 20469a3 | 2018-01-17 22:04:37 +0100 | [diff] [blame] | 506 | int nseg = blk_rq_nr_phys_segments(req); |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 507 | unsigned int avg_seg_size; |
| 508 | |
Keith Busch | 20469a3 | 2018-01-17 22:04:37 +0100 | [diff] [blame] | 509 | if (nseg == 0) |
| 510 | return false; |
| 511 | |
| 512 | avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 513 | |
| 514 | if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) |
| 515 | return false; |
| 516 | if (!iod->nvmeq->qid) |
| 517 | return false; |
| 518 | if (!sgl_threshold || avg_seg_size < sgl_threshold) |
| 519 | return false; |
| 520 | return true; |
| 521 | } |
| 522 | |
Christoph Hellwig | 7fe07d1 | 2019-03-03 08:15:19 -0700 | [diff] [blame] | 523 | static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 524 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 525 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 526 | const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1; |
| 527 | dma_addr_t dma_addr = iod->first_dma, next_dma_addr; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 528 | int i; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 529 | |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 530 | if (iod->dma_len) { |
Israel Rukshin | f2fa006 | 2019-08-28 14:11:48 +0300 | [diff] [blame] | 531 | dma_unmap_page(dev->dev, dma_addr, iod->dma_len, |
| 532 | rq_dma_dir(req)); |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 533 | return; |
Christoph Hellwig | 7fe07d1 | 2019-03-03 08:15:19 -0700 | [diff] [blame] | 534 | } |
| 535 | |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 536 | WARN_ON_ONCE(!iod->nents); |
| 537 | |
Logan Gunthorpe | 7f73eac | 2019-08-12 11:30:43 -0600 | [diff] [blame] | 538 | if (is_pci_p2pdma_page(sg_page(iod->sg))) |
| 539 | pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, |
| 540 | rq_dma_dir(req)); |
| 541 | else |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 542 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); |
| 543 | |
| 544 | |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 545 | if (iod->npages == 0) |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 546 | dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], |
| 547 | dma_addr); |
| 548 | |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 549 | for (i = 0; i < iod->npages; i++) { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 550 | void *addr = nvme_pci_iod_list(req)[i]; |
| 551 | |
| 552 | if (iod->use_sgl) { |
| 553 | struct nvme_sgl_desc *sg_list = addr; |
| 554 | |
| 555 | next_dma_addr = |
| 556 | le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); |
| 557 | } else { |
| 558 | __le64 *prp_list = addr; |
| 559 | |
| 560 | next_dma_addr = le64_to_cpu(prp_list[last_prp]); |
| 561 | } |
| 562 | |
| 563 | dma_pool_free(dev->prp_page_pool, addr, dma_addr); |
| 564 | dma_addr = next_dma_addr; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 565 | } |
Jens Axboe | ac3dd5b | 2015-01-22 12:07:58 -0700 | [diff] [blame] | 566 | |
Christoph Hellwig | d43f1cc | 2019-03-05 05:46:58 -0700 | [diff] [blame] | 567 | mempool_free(iod->sg, dev->iod_mempool); |
Keith Busch | b4ff9c8 | 2014-08-29 09:06:12 -0600 | [diff] [blame] | 568 | } |
| 569 | |
Keith Busch | d087747 | 2017-09-15 13:05:38 -0400 | [diff] [blame] | 570 | static void nvme_print_sgl(struct scatterlist *sgl, int nents) |
| 571 | { |
| 572 | int i; |
| 573 | struct scatterlist *sg; |
| 574 | |
| 575 | for_each_sg(sgl, sg, nents, i) { |
| 576 | dma_addr_t phys = sg_phys(sg); |
| 577 | pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " |
| 578 | "dma_address:%pad dma_length:%d\n", |
| 579 | i, &phys, sg->offset, sg->length, &sg_dma_address(sg), |
| 580 | sg_dma_len(sg)); |
| 581 | } |
| 582 | } |
| 583 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 584 | static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, |
| 585 | struct request *req, struct nvme_rw_command *cmnd) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 586 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 587 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 588 | struct dma_pool *pool; |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 589 | int length = blk_rq_payload_bytes(req); |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 590 | struct scatterlist *sg = iod->sg; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 591 | int dma_len = sg_dma_len(sg); |
| 592 | u64 dma_addr = sg_dma_address(sg); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 593 | u32 page_size = dev->ctrl.page_size; |
Murali Iyer | f137e0f | 2015-03-26 11:07:51 -0500 | [diff] [blame] | 594 | int offset = dma_addr & (page_size - 1); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 595 | __le64 *prp_list; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 596 | void **list = nvme_pci_iod_list(req); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 597 | dma_addr_t prp_dma; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 598 | int nprps, i; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 599 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 600 | length -= (page_size - offset); |
Jan H. Schönherr | 5228b32 | 2017-08-27 15:56:37 +0200 | [diff] [blame] | 601 | if (length <= 0) { |
| 602 | iod->first_dma = 0; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 603 | goto done; |
Jan H. Schönherr | 5228b32 | 2017-08-27 15:56:37 +0200 | [diff] [blame] | 604 | } |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 605 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 606 | dma_len -= (page_size - offset); |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 607 | if (dma_len) { |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 608 | dma_addr += (page_size - offset); |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 609 | } else { |
| 610 | sg = sg_next(sg); |
| 611 | dma_addr = sg_dma_address(sg); |
| 612 | dma_len = sg_dma_len(sg); |
| 613 | } |
| 614 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 615 | if (length <= page_size) { |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 616 | iod->first_dma = dma_addr; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 617 | goto done; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 618 | } |
| 619 | |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 620 | nprps = DIV_ROUND_UP(length, page_size); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 621 | if (nprps <= (256 / 8)) { |
| 622 | pool = dev->prp_small_pool; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 623 | iod->npages = 0; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 624 | } else { |
| 625 | pool = dev->prp_page_pool; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 626 | iod->npages = 1; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 627 | } |
| 628 | |
Christoph Hellwig | 69d2b57 | 2015-10-16 07:58:37 +0200 | [diff] [blame] | 629 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); |
Matthew Wilcox | b77954c | 2011-05-12 13:51:41 -0400 | [diff] [blame] | 630 | if (!prp_list) { |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 631 | iod->first_dma = dma_addr; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 632 | iod->npages = -1; |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 633 | return BLK_STS_RESOURCE; |
Matthew Wilcox | b77954c | 2011-05-12 13:51:41 -0400 | [diff] [blame] | 634 | } |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 635 | list[0] = prp_list; |
| 636 | iod->first_dma = prp_dma; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 637 | i = 0; |
| 638 | for (;;) { |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 639 | if (i == page_size >> 3) { |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 640 | __le64 *old_prp_list = prp_list; |
Christoph Hellwig | 69d2b57 | 2015-10-16 07:58:37 +0200 | [diff] [blame] | 641 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 642 | if (!prp_list) |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 643 | return BLK_STS_RESOURCE; |
Matthew Wilcox | eca18b2 | 2011-12-20 13:34:52 -0500 | [diff] [blame] | 644 | list[iod->npages++] = prp_list; |
Matthew Wilcox | 7523d83 | 2011-03-16 16:43:40 -0400 | [diff] [blame] | 645 | prp_list[0] = old_prp_list[i - 1]; |
| 646 | old_prp_list[i - 1] = cpu_to_le64(prp_dma); |
| 647 | i = 1; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 648 | } |
| 649 | prp_list[i++] = cpu_to_le64(dma_addr); |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 650 | dma_len -= page_size; |
| 651 | dma_addr += page_size; |
| 652 | length -= page_size; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 653 | if (length <= 0) |
| 654 | break; |
| 655 | if (dma_len > 0) |
| 656 | continue; |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 657 | if (unlikely(dma_len < 0)) |
| 658 | goto bad_sgl; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 659 | sg = sg_next(sg); |
| 660 | dma_addr = sg_dma_address(sg); |
| 661 | dma_len = sg_dma_len(sg); |
| 662 | } |
| 663 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 664 | done: |
| 665 | cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); |
| 666 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); |
| 667 | |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 668 | return BLK_STS_OK; |
| 669 | |
| 670 | bad_sgl: |
Keith Busch | d087747 | 2017-09-15 13:05:38 -0400 | [diff] [blame] | 671 | WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), |
| 672 | "Invalid SGL for payload:%d nents:%d\n", |
| 673 | blk_rq_payload_bytes(req), iod->nents); |
Keith Busch | 86eea28 | 2017-07-12 15:59:07 -0400 | [diff] [blame] | 674 | return BLK_STS_IOERR; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 675 | } |
| 676 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 677 | static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, |
| 678 | struct scatterlist *sg) |
| 679 | { |
| 680 | sge->addr = cpu_to_le64(sg_dma_address(sg)); |
| 681 | sge->length = cpu_to_le32(sg_dma_len(sg)); |
| 682 | sge->type = NVME_SGL_FMT_DATA_DESC << 4; |
| 683 | } |
| 684 | |
| 685 | static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, |
| 686 | dma_addr_t dma_addr, int entries) |
| 687 | { |
| 688 | sge->addr = cpu_to_le64(dma_addr); |
| 689 | if (entries < SGES_PER_PAGE) { |
| 690 | sge->length = cpu_to_le32(entries * sizeof(*sge)); |
| 691 | sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; |
| 692 | } else { |
| 693 | sge->length = cpu_to_le32(PAGE_SIZE); |
| 694 | sge->type = NVME_SGL_FMT_SEG_DESC << 4; |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 699 | struct request *req, struct nvme_rw_command *cmd, int entries) |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 700 | { |
| 701 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 702 | struct dma_pool *pool; |
| 703 | struct nvme_sgl_desc *sg_list; |
| 704 | struct scatterlist *sg = iod->sg; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 705 | dma_addr_t sgl_dma; |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 706 | int i = 0; |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 707 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 708 | /* setting the transfer type as SGL */ |
| 709 | cmd->flags = NVME_CMD_SGL_METABUF; |
| 710 | |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 711 | if (entries == 1) { |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 712 | nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); |
| 713 | return BLK_STS_OK; |
| 714 | } |
| 715 | |
| 716 | if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { |
| 717 | pool = dev->prp_small_pool; |
| 718 | iod->npages = 0; |
| 719 | } else { |
| 720 | pool = dev->prp_page_pool; |
| 721 | iod->npages = 1; |
| 722 | } |
| 723 | |
| 724 | sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); |
| 725 | if (!sg_list) { |
| 726 | iod->npages = -1; |
| 727 | return BLK_STS_RESOURCE; |
| 728 | } |
| 729 | |
| 730 | nvme_pci_iod_list(req)[0] = sg_list; |
| 731 | iod->first_dma = sgl_dma; |
| 732 | |
| 733 | nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); |
| 734 | |
| 735 | do { |
| 736 | if (i == SGES_PER_PAGE) { |
| 737 | struct nvme_sgl_desc *old_sg_desc = sg_list; |
| 738 | struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; |
| 739 | |
| 740 | sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); |
| 741 | if (!sg_list) |
| 742 | return BLK_STS_RESOURCE; |
| 743 | |
| 744 | i = 0; |
| 745 | nvme_pci_iod_list(req)[iod->npages++] = sg_list; |
| 746 | sg_list[i++] = *link; |
| 747 | nvme_pci_sgl_set_seg(link, sgl_dma, entries); |
| 748 | } |
| 749 | |
| 750 | nvme_pci_sgl_set_data(&sg_list[i++], sg); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 751 | sg = sg_next(sg); |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 752 | } while (--entries > 0); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 753 | |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 754 | return BLK_STS_OK; |
| 755 | } |
| 756 | |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 757 | static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, |
| 758 | struct request *req, struct nvme_rw_command *cmnd, |
| 759 | struct bio_vec *bv) |
| 760 | { |
| 761 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Kevin Hao | a4f4048 | 2019-10-18 10:53:14 +0800 | [diff] [blame] | 762 | unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1); |
| 763 | unsigned int first_prp_len = dev->ctrl.page_size - offset; |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 764 | |
| 765 | iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); |
| 766 | if (dma_mapping_error(dev->dev, iod->first_dma)) |
| 767 | return BLK_STS_RESOURCE; |
| 768 | iod->dma_len = bv->bv_len; |
| 769 | |
| 770 | cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); |
| 771 | if (bv->bv_len > first_prp_len) |
| 772 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); |
| 773 | return 0; |
| 774 | } |
| 775 | |
Christoph Hellwig | 2979105 | 2019-03-05 05:54:18 -0700 | [diff] [blame] | 776 | static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, |
| 777 | struct request *req, struct nvme_rw_command *cmnd, |
| 778 | struct bio_vec *bv) |
| 779 | { |
| 780 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
| 781 | |
| 782 | iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); |
| 783 | if (dma_mapping_error(dev->dev, iod->first_dma)) |
| 784 | return BLK_STS_RESOURCE; |
| 785 | iod->dma_len = bv->bv_len; |
| 786 | |
Klaus Birkelund Jensen | 049bf37 | 2019-04-30 18:53:29 +0200 | [diff] [blame] | 787 | cmnd->flags = NVME_CMD_SGL_METABUF; |
Christoph Hellwig | 2979105 | 2019-03-05 05:54:18 -0700 | [diff] [blame] | 788 | cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); |
| 789 | cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); |
| 790 | cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; |
| 791 | return 0; |
| 792 | } |
| 793 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 794 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 795 | struct nvme_command *cmnd) |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 796 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 797 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | 70479b7 | 2019-03-05 05:59:02 -0700 | [diff] [blame] | 798 | blk_status_t ret = BLK_STS_RESOURCE; |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 799 | int nr_mapped; |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 800 | |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 801 | if (blk_rq_nr_phys_segments(req) == 1) { |
| 802 | struct bio_vec bv = req_bvec(req); |
| 803 | |
| 804 | if (!is_pci_p2pdma_page(bv.bv_page)) { |
| 805 | if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2) |
| 806 | return nvme_setup_prp_simple(dev, req, |
| 807 | &cmnd->rw, &bv); |
Christoph Hellwig | 2979105 | 2019-03-05 05:54:18 -0700 | [diff] [blame] | 808 | |
| 809 | if (iod->nvmeq->qid && |
| 810 | dev->ctrl.sgls & ((1 << 0) | (1 << 1))) |
| 811 | return nvme_setup_sgl_simple(dev, req, |
| 812 | &cmnd->rw, &bv); |
Christoph Hellwig | dff824b | 2019-03-05 05:49:34 -0700 | [diff] [blame] | 813 | } |
| 814 | } |
| 815 | |
| 816 | iod->dma_len = 0; |
Christoph Hellwig | d43f1cc | 2019-03-05 05:46:58 -0700 | [diff] [blame] | 817 | iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); |
| 818 | if (!iod->sg) |
| 819 | return BLK_STS_RESOURCE; |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 820 | sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); |
Christoph Hellwig | 70479b7 | 2019-03-05 05:59:02 -0700 | [diff] [blame] | 821 | iod->nents = blk_rq_map_sg(req->q, req, iod->sg); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 822 | if (!iod->nents) |
| 823 | goto out; |
| 824 | |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 825 | if (is_pci_p2pdma_page(sg_page(iod->sg))) |
Logan Gunthorpe | 2b9f4bb | 2019-08-12 11:30:42 -0600 | [diff] [blame] | 826 | nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, |
| 827 | iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 828 | else |
| 829 | nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, |
Christoph Hellwig | 70479b7 | 2019-03-05 05:59:02 -0700 | [diff] [blame] | 830 | rq_dma_dir(req), DMA_ATTR_NO_WARN); |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 831 | if (!nr_mapped) |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 832 | goto out; |
| 833 | |
Christoph Hellwig | 70479b7 | 2019-03-05 05:59:02 -0700 | [diff] [blame] | 834 | iod->use_sgl = nvme_pci_use_sgls(dev, req); |
Minwoo Im | 955b1b5 | 2017-12-20 16:30:50 +0900 | [diff] [blame] | 835 | if (iod->use_sgl) |
Christoph Hellwig | b0f2853 | 2018-01-17 22:04:38 +0100 | [diff] [blame] | 836 | ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); |
Chaitanya Kulkarni | a7a7cbe | 2017-10-16 18:24:20 -0700 | [diff] [blame] | 837 | else |
| 838 | ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 839 | out: |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 840 | if (ret != BLK_STS_OK) |
| 841 | nvme_unmap_data(dev, req); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 842 | return ret; |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 843 | } |
| 844 | |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 845 | static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, |
| 846 | struct nvme_command *cmnd) |
| 847 | { |
| 848 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
| 849 | |
| 850 | iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), |
| 851 | rq_dma_dir(req), 0); |
| 852 | if (dma_mapping_error(dev->dev, iod->meta_dma)) |
| 853 | return BLK_STS_IOERR; |
| 854 | cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); |
| 855 | return 0; |
| 856 | } |
| 857 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 858 | /* |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 859 | * NOTE: ns is NULL when called on the admin queue. |
| 860 | */ |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 861 | static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 862 | const struct blk_mq_queue_data *bd) |
Keith Busch | 53562be | 2014-04-29 11:41:29 -0600 | [diff] [blame] | 863 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 864 | struct nvme_ns *ns = hctx->queue->queuedata; |
| 865 | struct nvme_queue *nvmeq = hctx->driver_data; |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 866 | struct nvme_dev *dev = nvmeq->dev; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 867 | struct request *req = bd->rq; |
Christoph Hellwig | 9b04811 | 2019-03-03 08:04:01 -0700 | [diff] [blame] | 868 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 869 | struct nvme_command cmnd; |
Christoph Hellwig | ebe6d87 | 2017-06-12 18:36:32 +0200 | [diff] [blame] | 870 | blk_status_t ret; |
Keith Busch | e1e5e56 | 2015-02-19 13:39:03 -0700 | [diff] [blame] | 871 | |
Christoph Hellwig | 9b04811 | 2019-03-03 08:04:01 -0700 | [diff] [blame] | 872 | iod->aborted = 0; |
| 873 | iod->npages = -1; |
| 874 | iod->nents = 0; |
| 875 | |
Jens Axboe | d1f06f4 | 2018-05-17 18:31:49 +0200 | [diff] [blame] | 876 | /* |
| 877 | * We should not need to do this, but we're still using this to |
| 878 | * ensure we can drain requests on a dying queue. |
| 879 | */ |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 880 | if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) |
Jens Axboe | d1f06f4 | 2018-05-17 18:31:49 +0200 | [diff] [blame] | 881 | return BLK_STS_IOERR; |
| 882 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 883 | ret = nvme_setup_cmd(ns, req, &cmnd); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 884 | if (ret) |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 885 | return ret; |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 886 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 887 | if (blk_rq_nr_phys_segments(req)) { |
Christoph Hellwig | b131c61 | 2017-01-13 12:29:12 +0100 | [diff] [blame] | 888 | ret = nvme_map_data(dev, req, &cmnd); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 889 | if (ret) |
Christoph Hellwig | 9b04811 | 2019-03-03 08:04:01 -0700 | [diff] [blame] | 890 | goto out_free_cmd; |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 891 | } |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 892 | |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 893 | if (blk_integrity_rq(req)) { |
| 894 | ret = nvme_map_metadata(dev, req, &cmnd); |
| 895 | if (ret) |
| 896 | goto out_unmap_data; |
| 897 | } |
| 898 | |
Christoph Hellwig | aae239e | 2015-11-26 12:59:50 +0100 | [diff] [blame] | 899 | blk_mq_start_request(req); |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 900 | nvme_submit_cmd(nvmeq, &cmnd, bd->last); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 901 | return BLK_STS_OK; |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 902 | out_unmap_data: |
| 903 | nvme_unmap_data(dev, req); |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 904 | out_free_cmd: |
| 905 | nvme_cleanup_cmd(req); |
Christoph Hellwig | ba1ca37 | 2015-10-16 07:58:38 +0200 | [diff] [blame] | 906 | return ret; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 907 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 908 | |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 909 | static void nvme_pci_complete_rq(struct request *req) |
Christoph Hellwig | eee417b | 2015-11-26 13:03:13 +0100 | [diff] [blame] | 910 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 911 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 912 | struct nvme_dev *dev = iod->nvmeq->dev; |
Christoph Hellwig | eee417b | 2015-11-26 13:03:13 +0100 | [diff] [blame] | 913 | |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 914 | if (blk_integrity_rq(req)) |
| 915 | dma_unmap_page(dev->dev, iod->meta_dma, |
| 916 | rq_integrity_vec(req)->bv_len, rq_data_dir(req)); |
Christoph Hellwig | b15c592 | 2019-03-03 08:52:21 -0700 | [diff] [blame] | 917 | if (blk_rq_nr_phys_segments(req)) |
Christoph Hellwig | 4aedb70 | 2019-03-03 09:46:28 -0700 | [diff] [blame] | 918 | nvme_unmap_data(dev, req); |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 919 | nvme_complete_rq(req); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 920 | } |
| 921 | |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 922 | /* We read the CQE phase first to check if the rest of the entry is valid */ |
Christoph Hellwig | 750dde4 | 2018-05-18 08:37:04 -0600 | [diff] [blame] | 923 | static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 924 | { |
Christoph Hellwig | 750dde4 | 2018-05-18 08:37:04 -0600 | [diff] [blame] | 925 | return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == |
| 926 | nvmeq->cq_phase; |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 927 | } |
| 928 | |
Sagi Grimberg | eb281c8 | 2017-06-18 17:28:07 +0300 | [diff] [blame] | 929 | static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 930 | { |
Sagi Grimberg | eb281c8 | 2017-06-18 17:28:07 +0300 | [diff] [blame] | 931 | u16 head = nvmeq->cq_head; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 932 | |
Keith Busch | 397c699 | 2018-06-06 08:13:05 -0600 | [diff] [blame] | 933 | if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, |
| 934 | nvmeq->dbbuf_cq_ei)) |
| 935 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); |
Sagi Grimberg | eb281c8 | 2017-06-18 17:28:07 +0300 | [diff] [blame] | 936 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 937 | |
Christoph Hellwig | cfa2735 | 2020-01-30 19:40:24 +0100 | [diff] [blame] | 938 | static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) |
| 939 | { |
| 940 | if (!nvmeq->qid) |
| 941 | return nvmeq->dev->admin_tagset.tags[0]; |
| 942 | return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; |
| 943 | } |
| 944 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 945 | static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 946 | { |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 947 | volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 948 | struct request *req; |
| 949 | |
| 950 | if (unlikely(cqe->command_id >= nvmeq->q_depth)) { |
| 951 | dev_warn(nvmeq->dev->ctrl.device, |
| 952 | "invalid id %d completed on queue %d\n", |
| 953 | cqe->command_id, le16_to_cpu(cqe->sq_id)); |
| 954 | return; |
| 955 | } |
| 956 | |
| 957 | /* |
| 958 | * AEN requests are special as they don't time out and can |
| 959 | * survive any kind of queue freeze and often don't respond to |
| 960 | * aborts. We don't even bother to allocate a struct request |
| 961 | * for them but rather special case them here. |
| 962 | */ |
Israel Rukshin | 58a8df6 | 2019-10-13 19:57:31 +0300 | [diff] [blame] | 963 | if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 964 | nvme_complete_async_event(&nvmeq->dev->ctrl, |
| 965 | cqe->status, &cqe->result); |
| 966 | return; |
| 967 | } |
| 968 | |
Christoph Hellwig | cfa2735 | 2020-01-30 19:40:24 +0100 | [diff] [blame] | 969 | req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); |
yupeng | 604c01d | 2018-12-18 17:59:53 +0100 | [diff] [blame] | 970 | trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); |
Sagi Grimberg | 83a12fb | 2017-06-18 17:28:08 +0300 | [diff] [blame] | 971 | nvme_end_request(req, cqe->status, cqe->result); |
| 972 | } |
| 973 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 974 | static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 975 | { |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 976 | while (start != end) { |
| 977 | nvme_handle_cqe(nvmeq, start); |
| 978 | if (++start == nvmeq->q_depth) |
| 979 | start = 0; |
Sagi Grimberg | 920d13a | 2017-06-18 17:28:09 +0300 | [diff] [blame] | 980 | } |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 981 | } |
| 982 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 983 | static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 984 | { |
Alexey Dobriyan | e2a366a | 2020-02-28 21:45:19 +0300 | [diff] [blame^] | 985 | if (++nvmeq->cq_head == nvmeq->q_depth) { |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 986 | nvmeq->cq_head = 0; |
Alexey Dobriyan | e2a366a | 2020-02-28 21:45:19 +0300 | [diff] [blame^] | 987 | nvmeq->cq_phase ^= 1; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 988 | } |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 989 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 990 | |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 991 | static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start, |
| 992 | u16 *end, unsigned int tag) |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 993 | { |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 994 | int found = 0; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 995 | |
| 996 | *start = nvmeq->cq_head; |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 997 | while (nvme_cqe_pending(nvmeq)) { |
| 998 | if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag) |
| 999 | found++; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1000 | nvme_update_cq_head(nvmeq); |
| 1001 | } |
| 1002 | *end = nvmeq->cq_head; |
| 1003 | |
| 1004 | if (*start != *end) |
Sagi Grimberg | 920d13a | 2017-06-18 17:28:09 +0300 | [diff] [blame] | 1005 | nvme_ring_cq_doorbell(nvmeq); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1006 | return found; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1007 | } |
| 1008 | |
| 1009 | static irqreturn_t nvme_irq(int irq, void *data) |
| 1010 | { |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 1011 | struct nvme_queue *nvmeq = data; |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 1012 | irqreturn_t ret = IRQ_NONE; |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1013 | u16 start, end; |
| 1014 | |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1015 | /* |
| 1016 | * The rmb/wmb pair ensures we see all updates from a previous run of |
| 1017 | * the irq handler, even if that was on another CPU. |
| 1018 | */ |
| 1019 | rmb(); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1020 | nvme_process_cq(nvmeq, &start, &end, -1); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1021 | wmb(); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1022 | |
Jens Axboe | 68fa9db | 2018-05-21 08:41:52 -0600 | [diff] [blame] | 1023 | if (start != end) { |
| 1024 | nvme_complete_cqes(nvmeq, start, end); |
| 1025 | return IRQ_HANDLED; |
| 1026 | } |
| 1027 | |
| 1028 | return ret; |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 1029 | } |
| 1030 | |
| 1031 | static irqreturn_t nvme_irq_check(int irq, void *data) |
| 1032 | { |
| 1033 | struct nvme_queue *nvmeq = data; |
Christoph Hellwig | 750dde4 | 2018-05-18 08:37:04 -0600 | [diff] [blame] | 1034 | if (nvme_cqe_pending(nvmeq)) |
Marta Rybczynska | d783e0b | 2016-03-22 16:02:06 +0100 | [diff] [blame] | 1035 | return IRQ_WAKE_THREAD; |
| 1036 | return IRQ_NONE; |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 1037 | } |
| 1038 | |
Christoph Hellwig | 0b2a8a9 | 2018-12-02 17:46:20 +0100 | [diff] [blame] | 1039 | /* |
| 1040 | * Poll for completions any queue, including those not dedicated to polling. |
| 1041 | * Can be called from any context. |
| 1042 | */ |
| 1043 | static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag) |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1044 | { |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1045 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1046 | u16 start, end; |
Jens Axboe | 1052b8a | 2018-11-26 08:21:49 -0700 | [diff] [blame] | 1047 | int found; |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1048 | |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1049 | /* |
| 1050 | * For a poll queue we need to protect against the polling thread |
| 1051 | * using the CQ lock. For normal interrupt driven threads we have |
| 1052 | * to disable the interrupt to avoid racing with it. |
| 1053 | */ |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1054 | if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) { |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1055 | spin_lock(&nvmeq->cq_poll_lock); |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1056 | found = nvme_process_cq(nvmeq, &start, &end, tag); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1057 | spin_unlock(&nvmeq->cq_poll_lock); |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1058 | } else { |
| 1059 | disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); |
| 1060 | found = nvme_process_cq(nvmeq, &start, &end, tag); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1061 | enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); |
Christoph Hellwig | 91a509f | 2018-12-13 09:48:00 +0100 | [diff] [blame] | 1062 | } |
Sagi Grimberg | 442e19b | 2017-06-18 17:28:10 +0300 | [diff] [blame] | 1063 | |
Jens Axboe | 5cb525c | 2018-05-17 18:31:50 +0200 | [diff] [blame] | 1064 | nvme_complete_cqes(nvmeq, start, end); |
Sagi Grimberg | 442e19b | 2017-06-18 17:28:10 +0300 | [diff] [blame] | 1065 | return found; |
Jens Axboe | a0fa964 | 2015-11-03 20:37:26 -0700 | [diff] [blame] | 1066 | } |
| 1067 | |
Jens Axboe | 9743139 | 2018-11-16 09:48:21 -0700 | [diff] [blame] | 1068 | static int nvme_poll(struct blk_mq_hw_ctx *hctx) |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1069 | { |
| 1070 | struct nvme_queue *nvmeq = hctx->driver_data; |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1071 | u16 start, end; |
| 1072 | bool found; |
| 1073 | |
| 1074 | if (!nvme_cqe_pending(nvmeq)) |
| 1075 | return 0; |
| 1076 | |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1077 | spin_lock(&nvmeq->cq_poll_lock); |
Jens Axboe | 9743139 | 2018-11-16 09:48:21 -0700 | [diff] [blame] | 1078 | found = nvme_process_cq(nvmeq, &start, &end, -1); |
Bijan Mottahedeh | 9515743 | 2020-02-26 18:53:43 -0800 | [diff] [blame] | 1079 | nvme_complete_cqes(nvmeq, start, end); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1080 | spin_unlock(&nvmeq->cq_poll_lock); |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1081 | |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1082 | return found; |
| 1083 | } |
| 1084 | |
Keith Busch | ad22c35 | 2017-11-07 15:13:12 -0700 | [diff] [blame] | 1085 | static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1086 | { |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 1087 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1088 | struct nvme_queue *nvmeq = &dev->queues[0]; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1089 | struct nvme_command c; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1090 | |
| 1091 | memset(&c, 0, sizeof(c)); |
| 1092 | c.common.opcode = nvme_admin_async_event; |
Keith Busch | ad22c35 | 2017-11-07 15:13:12 -0700 | [diff] [blame] | 1093 | c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 1094 | nvme_submit_cmd(nvmeq, &c, true); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1095 | } |
| 1096 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1097 | static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) |
| 1098 | { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1099 | struct nvme_command c; |
| 1100 | |
| 1101 | memset(&c, 0, sizeof(c)); |
| 1102 | c.delete_queue.opcode = opcode; |
| 1103 | c.delete_queue.qid = cpu_to_le16(id); |
| 1104 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1105 | return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1106 | } |
| 1107 | |
| 1108 | static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1109 | struct nvme_queue *nvmeq, s16 vector) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1110 | { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1111 | struct nvme_command c; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1112 | int flags = NVME_QUEUE_PHYS_CONTIG; |
| 1113 | |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1114 | if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1115 | flags |= NVME_CQ_IRQ_ENABLED; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1116 | |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1117 | /* |
Minwoo Im | 16772ae | 2017-10-18 22:56:09 +0900 | [diff] [blame] | 1118 | * Note: we (ab)use the fact that the prp fields survive if no data |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1119 | * is attached to the request. |
| 1120 | */ |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1121 | memset(&c, 0, sizeof(c)); |
| 1122 | c.create_cq.opcode = nvme_admin_create_cq; |
| 1123 | c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); |
| 1124 | c.create_cq.cqid = cpu_to_le16(qid); |
| 1125 | c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 1126 | c.create_cq.cq_flags = cpu_to_le16(flags); |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1127 | c.create_cq.irq_vector = cpu_to_le16(vector); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1128 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1129 | return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1130 | } |
| 1131 | |
| 1132 | static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, |
| 1133 | struct nvme_queue *nvmeq) |
| 1134 | { |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 1135 | struct nvme_ctrl *ctrl = &dev->ctrl; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1136 | struct nvme_command c; |
Keith Busch | 81c1cd9 | 2017-04-04 18:18:12 -0400 | [diff] [blame] | 1137 | int flags = NVME_QUEUE_PHYS_CONTIG; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1138 | |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1139 | /* |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 1140 | * Some drives have a bug that auto-enables WRRU if MEDIUM isn't |
| 1141 | * set. Since URGENT priority is zeroes, it makes all queues |
| 1142 | * URGENT. |
| 1143 | */ |
| 1144 | if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) |
| 1145 | flags |= NVME_SQ_PRIO_MEDIUM; |
| 1146 | |
| 1147 | /* |
Minwoo Im | 16772ae | 2017-10-18 22:56:09 +0900 | [diff] [blame] | 1148 | * Note: we (ab)use the fact that the prp fields survive if no data |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1149 | * is attached to the request. |
| 1150 | */ |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1151 | memset(&c, 0, sizeof(c)); |
| 1152 | c.create_sq.opcode = nvme_admin_create_sq; |
| 1153 | c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); |
| 1154 | c.create_sq.sqid = cpu_to_le16(qid); |
| 1155 | c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 1156 | c.create_sq.sq_flags = cpu_to_le16(flags); |
| 1157 | c.create_sq.cqid = cpu_to_le16(qid); |
| 1158 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1159 | return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1160 | } |
| 1161 | |
| 1162 | static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) |
| 1163 | { |
| 1164 | return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); |
| 1165 | } |
| 1166 | |
| 1167 | static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) |
| 1168 | { |
| 1169 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); |
| 1170 | } |
| 1171 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 1172 | static void abort_endio(struct request *req, blk_status_t error) |
Matthew Wilcox | bc5fc7e | 2011-09-19 17:08:14 -0400 | [diff] [blame] | 1173 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 1174 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
| 1175 | struct nvme_queue *nvmeq = iod->nvmeq; |
Matthew Wilcox | bc5fc7e | 2011-09-19 17:08:14 -0400 | [diff] [blame] | 1176 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 1177 | dev_warn(nvmeq->dev->ctrl.device, |
| 1178 | "Abort status: 0x%x", nvme_req(req)->status); |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1179 | atomic_inc(&nvmeq->dev->ctrl.abort_limit); |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1180 | blk_mq_free_request(req); |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1181 | } |
| 1182 | |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1183 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) |
| 1184 | { |
| 1185 | |
| 1186 | /* If true, indicates loss of adapter communication, possibly by a |
| 1187 | * NVMe Subsystem reset. |
| 1188 | */ |
| 1189 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); |
| 1190 | |
Jianchao Wang | ad70062 | 2018-01-22 22:03:16 +0800 | [diff] [blame] | 1191 | /* If there is a reset/reinit ongoing, we shouldn't reset again. */ |
| 1192 | switch (dev->ctrl.state) { |
| 1193 | case NVME_CTRL_RESETTING: |
Max Gurtovoy | ad6a0a5 | 2018-01-31 18:31:24 +0200 | [diff] [blame] | 1194 | case NVME_CTRL_CONNECTING: |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1195 | return false; |
Jianchao Wang | ad70062 | 2018-01-22 22:03:16 +0800 | [diff] [blame] | 1196 | default: |
| 1197 | break; |
| 1198 | } |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1199 | |
| 1200 | /* We shouldn't reset unless the controller is on fatal error state |
| 1201 | * _or_ if we lost the communication with it. |
| 1202 | */ |
| 1203 | if (!(csts & NVME_CSTS_CFS) && !nssro) |
| 1204 | return false; |
| 1205 | |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1206 | return true; |
| 1207 | } |
| 1208 | |
| 1209 | static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) |
| 1210 | { |
| 1211 | /* Read a config register to help see what died. */ |
| 1212 | u16 pci_status; |
| 1213 | int result; |
| 1214 | |
| 1215 | result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, |
| 1216 | &pci_status); |
| 1217 | if (result == PCIBIOS_SUCCESSFUL) |
| 1218 | dev_warn(dev->ctrl.device, |
| 1219 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", |
| 1220 | csts, pci_status); |
| 1221 | else |
| 1222 | dev_warn(dev->ctrl.device, |
| 1223 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", |
| 1224 | csts, result); |
| 1225 | } |
| 1226 | |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1227 | static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1228 | { |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 1229 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
| 1230 | struct nvme_queue *nvmeq = iod->nvmeq; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1231 | struct nvme_dev *dev = nvmeq->dev; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1232 | struct request *abort_req; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1233 | struct nvme_command cmd; |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1234 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
| 1235 | |
Wen Xiong | 651438b | 2018-02-15 14:05:10 -0600 | [diff] [blame] | 1236 | /* If PCI error recovery process is happening, we cannot reset or |
| 1237 | * the recovery mechanism will surely fail. |
| 1238 | */ |
| 1239 | mb(); |
| 1240 | if (pci_channel_offline(to_pci_dev(dev->dev))) |
| 1241 | return BLK_EH_RESET_TIMER; |
| 1242 | |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1243 | /* |
| 1244 | * Reset immediately if the controller is failed |
| 1245 | */ |
| 1246 | if (nvme_should_reset(dev, csts)) { |
| 1247 | nvme_warn_reset(dev, csts); |
| 1248 | nvme_dev_disable(dev, false); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 1249 | nvme_reset_ctrl(&dev->ctrl); |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1250 | return BLK_EH_DONE; |
Keith Busch | b2a0eb1 | 2017-06-07 20:32:50 +0200 | [diff] [blame] | 1251 | } |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1252 | |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1253 | /* |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1254 | * Did we miss an interrupt? |
| 1255 | */ |
Christoph Hellwig | 0b2a8a9 | 2018-12-02 17:46:20 +0100 | [diff] [blame] | 1256 | if (nvme_poll_irqdisable(nvmeq, req->tag)) { |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1257 | dev_warn(dev->ctrl.device, |
| 1258 | "I/O %d QID %d timeout, completion polled\n", |
| 1259 | req->tag, nvmeq->qid); |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1260 | return BLK_EH_DONE; |
Keith Busch | 7776db1 | 2017-02-24 17:59:28 -0500 | [diff] [blame] | 1261 | } |
| 1262 | |
| 1263 | /* |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1264 | * Shutdown immediately if controller times out while starting. The |
| 1265 | * reset work will see the pci device disabled when it gets the forced |
| 1266 | * cancellation error. All outstanding requests are completed on |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1267 | * shutdown, so we return BLK_EH_DONE. |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1268 | */ |
Keith Busch | 4244140 | 2018-02-08 08:55:34 -0700 | [diff] [blame] | 1269 | switch (dev->ctrl.state) { |
| 1270 | case NVME_CTRL_CONNECTING: |
Keith Busch | 2036f72 | 2019-05-14 14:27:53 -0600 | [diff] [blame] | 1271 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
| 1272 | /* fall through */ |
| 1273 | case NVME_CTRL_DELETING: |
Keith Busch | b9cac43 | 2018-05-24 14:34:55 -0600 | [diff] [blame] | 1274 | dev_warn_ratelimited(dev->ctrl.device, |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1275 | "I/O %d QID %d timeout, disable controller\n", |
| 1276 | req->tag, nvmeq->qid); |
Keith Busch | 2036f72 | 2019-05-14 14:27:53 -0600 | [diff] [blame] | 1277 | nvme_dev_disable(dev, true); |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 1278 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1279 | return BLK_EH_DONE; |
Keith Busch | 39a9dd8 | 2019-05-14 14:10:41 -0600 | [diff] [blame] | 1280 | case NVME_CTRL_RESETTING: |
| 1281 | return BLK_EH_RESET_TIMER; |
Keith Busch | 4244140 | 2018-02-08 08:55:34 -0700 | [diff] [blame] | 1282 | default: |
| 1283 | break; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1284 | } |
| 1285 | |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 1286 | /* |
| 1287 | * Shutdown the controller immediately and schedule a reset if the |
| 1288 | * command was already aborted once before and still hasn't been |
| 1289 | * returned to the driver, or if this is the admin queue. |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1290 | */ |
Christoph Hellwig | f4800d6 | 2015-11-28 15:43:10 +0100 | [diff] [blame] | 1291 | if (!nvmeq->qid || iod->aborted) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1292 | dev_warn(dev->ctrl.device, |
Keith Busch | e1569a1 | 2015-11-26 12:11:07 +0100 | [diff] [blame] | 1293 | "I/O %d QID %d timeout, reset controller\n", |
| 1294 | req->tag, nvmeq->qid); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1295 | nvme_dev_disable(dev, false); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 1296 | nvme_reset_ctrl(&dev->ctrl); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1297 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 1298 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
Christoph Hellwig | db8c48e | 2018-05-29 15:52:30 +0200 | [diff] [blame] | 1299 | return BLK_EH_DONE; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1300 | } |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1301 | |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1302 | if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { |
| 1303 | atomic_inc(&dev->ctrl.abort_limit); |
| 1304 | return BLK_EH_RESET_TIMER; |
| 1305 | } |
Keith Busch | 7bf7d77 | 2017-01-24 18:07:00 -0500 | [diff] [blame] | 1306 | iod->aborted = 1; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1307 | |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1308 | memset(&cmd, 0, sizeof(cmd)); |
| 1309 | cmd.abort.opcode = nvme_admin_abort_cmd; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1310 | cmd.abort.cid = req->tag; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1311 | cmd.abort.sqid = cpu_to_le16(nvmeq->qid); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1312 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1313 | dev_warn(nvmeq->dev->ctrl.device, |
| 1314 | "I/O %d QID %d timeout, aborting\n", |
| 1315 | req->tag, nvmeq->qid); |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1316 | |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1317 | abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 1318 | BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 1319 | if (IS_ERR(abort_req)) { |
| 1320 | atomic_inc(&dev->ctrl.abort_limit); |
Christoph Hellwig | 31c7c7d | 2015-10-22 14:03:35 +0200 | [diff] [blame] | 1321 | return BLK_EH_RESET_TIMER; |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 1322 | } |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1323 | |
Christoph Hellwig | e7a2a87 | 2015-11-16 10:39:48 +0100 | [diff] [blame] | 1324 | abort_req->timeout = ADMIN_TIMEOUT; |
| 1325 | abort_req->end_io_data = NULL; |
| 1326 | blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 1327 | |
Keith Busch | 7a509a6 | 2015-01-07 18:55:53 -0700 | [diff] [blame] | 1328 | /* |
| 1329 | * The aborted req will be completed on receiving the abort req. |
| 1330 | * We enable the timer again. If hit twice, it'll cause a device reset, |
| 1331 | * as the device then is in a faulty state. |
| 1332 | */ |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 1333 | return BLK_EH_RESET_TIMER; |
Matthew Wilcox | a09115b | 2012-08-07 15:56:23 -0400 | [diff] [blame] | 1334 | } |
| 1335 | |
Keith Busch | f435c28 | 2014-07-07 09:14:42 -0600 | [diff] [blame] | 1336 | static void nvme_free_queue(struct nvme_queue *nvmeq) |
Matthew Wilcox | 9e86677 | 2012-08-03 13:55:56 -0400 | [diff] [blame] | 1337 | { |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1338 | dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), |
Matthew Wilcox | 9e86677 | 2012-08-03 13:55:56 -0400 | [diff] [blame] | 1339 | (void *)nvmeq->cqes, nvmeq->cq_dma_addr); |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1340 | if (!nvmeq->sq_cmds) |
| 1341 | return; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1342 | |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1343 | if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { |
Keith Busch | 88a041f | 2019-03-08 10:43:11 -0700 | [diff] [blame] | 1344 | pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1345 | nvmeq->sq_cmds, SQ_SIZE(nvmeq)); |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1346 | } else { |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1347 | dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1348 | nvmeq->sq_cmds, nvmeq->sq_dma_addr); |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1349 | } |
Matthew Wilcox | 9e86677 | 2012-08-03 13:55:56 -0400 | [diff] [blame] | 1350 | } |
| 1351 | |
Keith Busch | a1a5ef9 | 2013-12-16 13:50:00 -0500 | [diff] [blame] | 1352 | static void nvme_free_queues(struct nvme_dev *dev, int lowest) |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1353 | { |
| 1354 | int i; |
| 1355 | |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1356 | for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1357 | dev->ctrl.queue_count--; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1358 | nvme_free_queue(&dev->queues[i]); |
kaoudis | 121c7ad | 2015-01-14 21:01:58 -0700 | [diff] [blame] | 1359 | } |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1360 | } |
| 1361 | |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1362 | /** |
| 1363 | * nvme_suspend_queue - put queue into suspended state |
Bart Van Assche | 40581d1 | 2018-10-08 14:28:43 -0700 | [diff] [blame] | 1364 | * @nvmeq: queue to suspend |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1365 | */ |
| 1366 | static int nvme_suspend_queue(struct nvme_queue *nvmeq) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1367 | { |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1368 | if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) |
Keith Busch | 2b25d98 | 2014-12-22 12:59:04 -0700 | [diff] [blame] | 1369 | return 1; |
Matthew Wilcox | a09115b | 2012-08-07 15:56:23 -0400 | [diff] [blame] | 1370 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1371 | /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ |
Jens Axboe | d1f06f4 | 2018-05-17 18:31:49 +0200 | [diff] [blame] | 1372 | mb(); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1373 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1374 | nvmeq->dev->online_queues--; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1375 | if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) |
Sagi Grimberg | c81545f | 2017-07-02 15:53:27 +0300 | [diff] [blame] | 1376 | blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1377 | if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) |
| 1378 | pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1379 | return 0; |
| 1380 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1381 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 1382 | static void nvme_suspend_io_queues(struct nvme_dev *dev) |
| 1383 | { |
| 1384 | int i; |
| 1385 | |
| 1386 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) |
| 1387 | nvme_suspend_queue(&dev->queues[i]); |
| 1388 | } |
| 1389 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1390 | static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1391 | { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1392 | struct nvme_queue *nvmeq = &dev->queues[0]; |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 1393 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 1394 | if (shutdown) |
| 1395 | nvme_shutdown_ctrl(&dev->ctrl); |
| 1396 | else |
Sagi Grimberg | b5b0504 | 2019-07-22 17:06:54 -0700 | [diff] [blame] | 1397 | nvme_disable_ctrl(&dev->ctrl); |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 1398 | |
Christoph Hellwig | 0b2a8a9 | 2018-12-02 17:46:20 +0100 | [diff] [blame] | 1399 | nvme_poll_irqdisable(nvmeq, -1); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1400 | } |
| 1401 | |
Keith Busch | fa46c6f | 2020-02-13 01:41:05 +0900 | [diff] [blame] | 1402 | /* |
| 1403 | * Called only on a device that has been disabled and after all other threads |
| 1404 | * that can check this device's completion queues have synced. This is the |
| 1405 | * last chance for the driver to see a natural completion before |
| 1406 | * nvme_cancel_request() terminates all incomplete requests. |
| 1407 | */ |
| 1408 | static void nvme_reap_pending_cqes(struct nvme_dev *dev) |
| 1409 | { |
| 1410 | u16 start, end; |
| 1411 | int i; |
| 1412 | |
| 1413 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) { |
| 1414 | nvme_process_cq(&dev->queues[i], &start, &end, -1); |
| 1415 | nvme_complete_cqes(&dev->queues[i], start, end); |
| 1416 | } |
| 1417 | } |
| 1418 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1419 | static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, |
| 1420 | int entry_size) |
| 1421 | { |
| 1422 | int q_depth = dev->q_depth; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1423 | unsigned q_size_aligned = roundup(q_depth * entry_size, |
| 1424 | dev->ctrl.page_size); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1425 | |
| 1426 | if (q_size_aligned * nr_io_queues > dev->cmb_size) { |
Jon Derrick | c45f5c9 | 2015-07-21 15:08:13 -0600 | [diff] [blame] | 1427 | u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1428 | mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); |
Jon Derrick | c45f5c9 | 2015-07-21 15:08:13 -0600 | [diff] [blame] | 1429 | q_depth = div_u64(mem_per_q, entry_size); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1430 | |
| 1431 | /* |
| 1432 | * Ensure the reduced q_depth is above some threshold where it |
| 1433 | * would be better to map queues in system memory with the |
| 1434 | * original depth |
| 1435 | */ |
| 1436 | if (q_depth < 64) |
| 1437 | return -ENOMEM; |
| 1438 | } |
| 1439 | |
| 1440 | return q_depth; |
| 1441 | } |
| 1442 | |
| 1443 | static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1444 | int qid) |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1445 | { |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1446 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1447 | |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1448 | if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1449 | nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); |
Alan Mikhak | bfac8e9 | 2019-07-08 10:05:11 -0700 | [diff] [blame] | 1450 | if (nvmeq->sq_cmds) { |
| 1451 | nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, |
| 1452 | nvmeq->sq_cmds); |
| 1453 | if (nvmeq->sq_dma_addr) { |
| 1454 | set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); |
| 1455 | return 0; |
| 1456 | } |
| 1457 | |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1458 | pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1459 | } |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1460 | } |
| 1461 | |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1462 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), |
Christoph Hellwig | 6322307 | 2018-12-02 17:46:18 +0100 | [diff] [blame] | 1463 | &nvmeq->sq_dma_addr, GFP_KERNEL); |
Keith Busch | 815c670 | 2018-02-13 05:44:44 -0700 | [diff] [blame] | 1464 | if (!nvmeq->sq_cmds) |
| 1465 | return -ENOMEM; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1466 | return 0; |
| 1467 | } |
| 1468 | |
Keith Busch | a6ff726 | 2018-04-12 09:16:09 -0600 | [diff] [blame] | 1469 | static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1470 | { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1471 | struct nvme_queue *nvmeq = &dev->queues[qid]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1472 | |
Keith Busch | 62314e4 | 2018-01-23 09:16:19 -0700 | [diff] [blame] | 1473 | if (dev->ctrl.queue_count > qid) |
| 1474 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1475 | |
Benjamin Herrenschmidt | c1e0cc7 | 2019-08-07 17:51:20 +1000 | [diff] [blame] | 1476 | nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1477 | nvmeq->q_depth = depth; |
| 1478 | nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), |
Luis Chamberlain | 750afb0 | 2019-01-04 09:23:09 +0100 | [diff] [blame] | 1479 | &nvmeq->cq_dma_addr, GFP_KERNEL); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1480 | if (!nvmeq->cqes) |
| 1481 | goto free_nvmeq; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1482 | |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1483 | if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1484 | goto free_cqdma; |
| 1485 | |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1486 | nvmeq->dev = dev; |
Jens Axboe | 1ab0cd6 | 2018-05-17 18:31:51 +0200 | [diff] [blame] | 1487 | spin_lock_init(&nvmeq->sq_lock); |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1488 | spin_lock_init(&nvmeq->cq_poll_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1489 | nvmeq->cq_head = 0; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 1490 | nvmeq->cq_phase = 1; |
Haiyan Hu | b80d5cc | 2013-09-10 11:25:37 +0800 | [diff] [blame] | 1491 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
Keith Busch | c30341d | 2013-12-10 13:10:38 -0700 | [diff] [blame] | 1492 | nvmeq->qid = qid; |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1493 | dev->ctrl.queue_count++; |
Jon Derrick | 36a7e99 | 2015-05-27 12:26:23 -0600 | [diff] [blame] | 1494 | |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1495 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1496 | |
| 1497 | free_cqdma: |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1498 | dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, |
| 1499 | nvmeq->cq_dma_addr); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1500 | free_nvmeq: |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1501 | return -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1502 | } |
| 1503 | |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 1504 | static int queue_request_irq(struct nvme_queue *nvmeq) |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 1505 | { |
Christoph Hellwig | 0ff199c | 2017-04-13 09:06:43 +0200 | [diff] [blame] | 1506 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
| 1507 | int nr = nvmeq->dev->ctrl.instance; |
| 1508 | |
| 1509 | if (use_threaded_interrupts) { |
| 1510 | return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, |
| 1511 | nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); |
| 1512 | } else { |
| 1513 | return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, |
| 1514 | NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); |
| 1515 | } |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 1516 | } |
| 1517 | |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1518 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1519 | { |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1520 | struct nvme_dev *dev = nvmeq->dev; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1521 | |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1522 | nvmeq->sq_tail = 0; |
Jens Axboe | 04f3eaf | 2018-11-29 10:02:29 -0700 | [diff] [blame] | 1523 | nvmeq->last_sq_tail = 0; |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1524 | nvmeq->cq_head = 0; |
| 1525 | nvmeq->cq_phase = 1; |
Haiyan Hu | b80d5cc | 2013-09-10 11:25:37 +0800 | [diff] [blame] | 1526 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
Benjamin Herrenschmidt | 8a1d09a | 2019-08-07 17:51:19 +1000 | [diff] [blame] | 1527 | memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 1528 | nvme_dbbuf_init(dev, nvmeq, qid); |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1529 | dev->online_queues++; |
Christoph Hellwig | 3a7afd8 | 2018-12-02 17:46:23 +0100 | [diff] [blame] | 1530 | wmb(); /* ensure the first interrupt sees the initialization */ |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1531 | } |
| 1532 | |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1533 | static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1534 | { |
| 1535 | struct nvme_dev *dev = nvmeq->dev; |
| 1536 | int result; |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1537 | u16 vector = 0; |
Matthew Wilcox | 3f85d50 | 2011-02-01 08:39:04 -0500 | [diff] [blame] | 1538 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 1539 | clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); |
| 1540 | |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 1541 | /* |
| 1542 | * A queue's vector matches the queue identifier unless the controller |
| 1543 | * has only one vector available. |
| 1544 | */ |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1545 | if (!polled) |
| 1546 | vector = dev->num_vecs == 1 ? 0 : qid; |
| 1547 | else |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1548 | set_bit(NVMEQ_POLLED, &nvmeq->flags); |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1549 | |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1550 | result = adapter_alloc_cq(dev, qid, nvmeq, vector); |
Keith Busch | ded4550 | 2018-06-06 08:13:06 -0600 | [diff] [blame] | 1551 | if (result) |
| 1552 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1553 | |
| 1554 | result = adapter_alloc_sq(dev, qid, nvmeq); |
| 1555 | if (result < 0) |
Keith Busch | ded4550 | 2018-06-06 08:13:06 -0600 | [diff] [blame] | 1556 | return result; |
Edmund Nadolski | c80b36c | 2019-11-25 09:06:12 -0700 | [diff] [blame] | 1557 | if (result) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1558 | goto release_cq; |
| 1559 | |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1560 | nvmeq->cq_vector = vector; |
Keith Busch | 161b8be | 2017-09-14 13:54:39 -0400 | [diff] [blame] | 1561 | nvme_init_queue(nvmeq, qid); |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1562 | |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1563 | if (!polled) { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1564 | result = queue_request_irq(nvmeq); |
| 1565 | if (result < 0) |
| 1566 | goto release_sq; |
| 1567 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1568 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1569 | set_bit(NVMEQ_ENABLED, &nvmeq->flags); |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1570 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1571 | |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1572 | release_sq: |
Jianchao Wang | f25a2df | 2018-02-15 19:13:41 +0800 | [diff] [blame] | 1573 | dev->online_queues--; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1574 | adapter_delete_sq(dev, qid); |
Jianchao Wang | a8e3e0b | 2018-05-24 17:51:33 +0800 | [diff] [blame] | 1575 | release_cq: |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1576 | adapter_delete_cq(dev, qid); |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 1577 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1578 | } |
| 1579 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 1580 | static const struct blk_mq_ops nvme_mq_admin_ops = { |
Christoph Hellwig | d29ec82 | 2015-05-22 11:12:46 +0200 | [diff] [blame] | 1581 | .queue_rq = nvme_queue_rq, |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 1582 | .complete = nvme_pci_complete_rq, |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1583 | .init_hctx = nvme_admin_init_hctx, |
Christoph Hellwig | 0350815 | 2017-06-13 09:15:18 +0200 | [diff] [blame] | 1584 | .init_request = nvme_init_request, |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1585 | .timeout = nvme_timeout, |
| 1586 | }; |
| 1587 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 1588 | static const struct blk_mq_ops nvme_mq_ops = { |
Christoph Hellwig | 376f7ef | 2018-12-02 17:46:27 +0100 | [diff] [blame] | 1589 | .queue_rq = nvme_queue_rq, |
| 1590 | .complete = nvme_pci_complete_rq, |
| 1591 | .commit_rqs = nvme_commit_rqs, |
| 1592 | .init_hctx = nvme_init_hctx, |
| 1593 | .init_request = nvme_init_request, |
| 1594 | .map_queues = nvme_pci_map_queues, |
| 1595 | .timeout = nvme_timeout, |
| 1596 | .poll = nvme_poll, |
Jens Axboe | dabcefa | 2018-11-14 09:38:28 -0700 | [diff] [blame] | 1597 | }; |
| 1598 | |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1599 | static void nvme_dev_remove_admin(struct nvme_dev *dev) |
| 1600 | { |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1601 | if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 1602 | /* |
| 1603 | * If the controller was reset during removal, it's possible |
| 1604 | * user requests may be waiting on a stopped queue. Start the |
| 1605 | * queue to flush these to completion. |
| 1606 | */ |
Sagi Grimberg | c81545f | 2017-07-02 15:53:27 +0300 | [diff] [blame] | 1607 | blk_mq_unquiesce_queue(dev->ctrl.admin_q); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1608 | blk_cleanup_queue(dev->ctrl.admin_q); |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1609 | blk_mq_free_tag_set(&dev->admin_tagset); |
| 1610 | } |
| 1611 | } |
| 1612 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1613 | static int nvme_alloc_admin_tags(struct nvme_dev *dev) |
| 1614 | { |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1615 | if (!dev->ctrl.admin_q) { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1616 | dev->admin_tagset.ops = &nvme_mq_admin_ops; |
| 1617 | dev->admin_tagset.nr_hw_queues = 1; |
Keith Busch | e3e9d50 | 2016-01-04 09:10:55 -0700 | [diff] [blame] | 1618 | |
Keith Busch | 38dabe2 | 2017-11-07 15:13:10 -0700 | [diff] [blame] | 1619 | dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1620 | dev->admin_tagset.timeout = ADMIN_TIMEOUT; |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1621 | dev->admin_tagset.numa_node = dev_to_node(dev->dev); |
Christoph Hellwig | d43f1cc | 2019-03-05 05:46:58 -0700 | [diff] [blame] | 1622 | dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 1623 | dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1624 | dev->admin_tagset.driver_data = dev; |
| 1625 | |
| 1626 | if (blk_mq_alloc_tag_set(&dev->admin_tagset)) |
| 1627 | return -ENOMEM; |
Sagi Grimberg | 34b6c23 | 2017-07-10 09:22:29 +0300 | [diff] [blame] | 1628 | dev->ctrl.admin_tagset = &dev->admin_tagset; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1629 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1630 | dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); |
| 1631 | if (IS_ERR(dev->ctrl.admin_q)) { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1632 | blk_mq_free_tag_set(&dev->admin_tagset); |
| 1633 | return -ENOMEM; |
| 1634 | } |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1635 | if (!blk_get_queue(dev->ctrl.admin_q)) { |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1636 | nvme_dev_remove_admin(dev); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 1637 | dev->ctrl.admin_q = NULL; |
Keith Busch | ea191d2 | 2015-01-07 18:55:49 -0700 | [diff] [blame] | 1638 | return -ENODEV; |
| 1639 | } |
Keith Busch | 0fb59cb | 2015-01-07 18:55:50 -0700 | [diff] [blame] | 1640 | } else |
Sagi Grimberg | c81545f | 2017-07-02 15:53:27 +0300 | [diff] [blame] | 1641 | blk_mq_unquiesce_queue(dev->ctrl.admin_q); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1642 | |
| 1643 | return 0; |
| 1644 | } |
| 1645 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 1646 | static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) |
| 1647 | { |
| 1648 | return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); |
| 1649 | } |
| 1650 | |
| 1651 | static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) |
| 1652 | { |
| 1653 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 1654 | |
| 1655 | if (size <= dev->bar_mapped_size) |
| 1656 | return 0; |
| 1657 | if (size > pci_resource_len(pdev, 0)) |
| 1658 | return -ENOMEM; |
| 1659 | if (dev->bar) |
| 1660 | iounmap(dev->bar); |
| 1661 | dev->bar = ioremap(pci_resource_start(pdev, 0), size); |
| 1662 | if (!dev->bar) { |
| 1663 | dev->bar_mapped_size = 0; |
| 1664 | return -ENOMEM; |
| 1665 | } |
| 1666 | dev->bar_mapped_size = size; |
| 1667 | dev->dbs = dev->bar + NVME_REG_DBS; |
| 1668 | |
| 1669 | return 0; |
| 1670 | } |
| 1671 | |
Sagi Grimberg | 01ad099 | 2017-05-01 00:27:17 +0300 | [diff] [blame] | 1672 | static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1673 | { |
Matthew Wilcox | ba47e38 | 2013-05-04 06:43:16 -0400 | [diff] [blame] | 1674 | int result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1675 | u32 aqa; |
| 1676 | struct nvme_queue *nvmeq; |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 1677 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 1678 | result = nvme_remap_bar(dev, db_bar_size(dev, 0)); |
| 1679 | if (result < 0) |
| 1680 | return result; |
| 1681 | |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 1682 | dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 1683 | NVME_CAP_NSSRC(dev->ctrl.cap) : 0; |
Keith Busch | dfbac8c | 2015-08-10 15:20:40 -0600 | [diff] [blame] | 1684 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 1685 | if (dev->subsystem && |
| 1686 | (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) |
| 1687 | writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); |
Keith Busch | dfbac8c | 2015-08-10 15:20:40 -0600 | [diff] [blame] | 1688 | |
Sagi Grimberg | b5b0504 | 2019-07-22 17:06:54 -0700 | [diff] [blame] | 1689 | result = nvme_disable_ctrl(&dev->ctrl); |
Matthew Wilcox | ba47e38 | 2013-05-04 06:43:16 -0400 | [diff] [blame] | 1690 | if (result < 0) |
| 1691 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1692 | |
Keith Busch | a6ff726 | 2018-04-12 09:16:09 -0600 | [diff] [blame] | 1693 | result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1694 | if (result) |
| 1695 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1696 | |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 1697 | nvmeq = &dev->queues[0]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1698 | aqa = nvmeq->q_depth - 1; |
| 1699 | aqa |= aqa << 16; |
| 1700 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 1701 | writel(aqa, dev->bar + NVME_REG_AQA); |
| 1702 | lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); |
| 1703 | lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); |
Keith Busch | 1d09062 | 2014-06-23 11:34:01 -0600 | [diff] [blame] | 1704 | |
Sagi Grimberg | c0f2f45 | 2019-07-22 17:06:53 -0700 | [diff] [blame] | 1705 | result = nvme_enable_ctrl(&dev->ctrl); |
Keith Busch | 025c557 | 2013-05-01 13:07:51 -0600 | [diff] [blame] | 1706 | if (result) |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 1707 | return result; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 1708 | |
Keith Busch | 2b25d98 | 2014-12-22 12:59:04 -0700 | [diff] [blame] | 1709 | nvmeq->cq_vector = 0; |
Keith Busch | 161b8be | 2017-09-14 13:54:39 -0400 | [diff] [blame] | 1710 | nvme_init_queue(nvmeq, 0); |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 1711 | result = queue_request_irq(nvmeq); |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 1712 | if (result) { |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 1713 | dev->online_queues--; |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 1714 | return result; |
Jon Derrick | 758dd7f | 2015-06-30 11:22:52 -0600 | [diff] [blame] | 1715 | } |
Keith Busch | 025c557 | 2013-05-01 13:07:51 -0600 | [diff] [blame] | 1716 | |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 1717 | set_bit(NVMEQ_ENABLED, &nvmeq->flags); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1718 | return result; |
| 1719 | } |
| 1720 | |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1721 | static int nvme_create_io_queues(struct nvme_dev *dev) |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1722 | { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1723 | unsigned i, max, rw_queues; |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1724 | int ret = 0; |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1725 | |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1726 | for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { |
Keith Busch | a6ff726 | 2018-04-12 09:16:09 -0600 | [diff] [blame] | 1727 | if (nvme_alloc_queue(dev, i, dev->q_depth)) { |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1728 | ret = -ENOMEM; |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1729 | break; |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1730 | } |
| 1731 | } |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1732 | |
Sagi Grimberg | d858e5f | 2017-04-24 10:58:29 +0300 | [diff] [blame] | 1733 | max = min(dev->max_qid, dev->ctrl.queue_count - 1); |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 1734 | if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { |
| 1735 | rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + |
| 1736 | dev->io_queues[HCTX_TYPE_READ]; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1737 | } else { |
| 1738 | rw_queues = max; |
| 1739 | } |
| 1740 | |
Keith Busch | 949928c | 2015-12-17 17:08:15 -0700 | [diff] [blame] | 1741 | for (i = dev->online_queues; i <= max; i++) { |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 1742 | bool polled = i > rw_queues; |
| 1743 | |
| 1744 | ret = nvme_create_queue(&dev->queues[i], i, polled); |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 1745 | if (ret) |
Keith Busch | 42f6142 | 2014-03-24 10:46:25 -0600 | [diff] [blame] | 1746 | break; |
Matthew Wilcox | 27e8166 | 2014-04-11 11:58:45 -0400 | [diff] [blame] | 1747 | } |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1748 | |
| 1749 | /* |
| 1750 | * Ignore failing Create SQ/CQ commands, we can continue with less |
Minwoo Im | 8adb8c1 | 2018-01-14 16:14:27 +0900 | [diff] [blame] | 1751 | * than the desired amount of queues, and even a controller without |
| 1752 | * I/O queues can still be used to issue admin commands. This might |
Christoph Hellwig | 749941f | 2015-11-26 11:46:39 +0100 | [diff] [blame] | 1753 | * be useful to upgrade a buggy firmware for example. |
| 1754 | */ |
| 1755 | return ret >= 0 ? 0 : ret; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1756 | } |
| 1757 | |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 1758 | static ssize_t nvme_cmb_show(struct device *dev, |
| 1759 | struct device_attribute *attr, |
| 1760 | char *buf) |
| 1761 | { |
| 1762 | struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); |
| 1763 | |
Stephen Bates | c965809 | 2016-12-16 11:54:50 -0700 | [diff] [blame] | 1764 | return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 1765 | ndev->cmbloc, ndev->cmbsz); |
| 1766 | } |
| 1767 | static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); |
| 1768 | |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1769 | static u64 nvme_cmb_size_unit(struct nvme_dev *dev) |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1770 | { |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1771 | u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; |
| 1772 | |
| 1773 | return 1ULL << (12 + 4 * szu); |
| 1774 | } |
| 1775 | |
| 1776 | static u32 nvme_cmb_size(struct nvme_dev *dev) |
| 1777 | { |
| 1778 | return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; |
| 1779 | } |
| 1780 | |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1781 | static void nvme_map_cmb(struct nvme_dev *dev) |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1782 | { |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1783 | u64 size, offset; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1784 | resource_size_t bar_size; |
| 1785 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Christoph Hellwig | 8969f1f | 2017-10-01 09:37:35 +0200 | [diff] [blame] | 1786 | int bar; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1787 | |
Keith Busch | 9fe5c59 | 2018-10-31 13:15:29 -0600 | [diff] [blame] | 1788 | if (dev->cmb_size) |
| 1789 | return; |
| 1790 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 1791 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1792 | if (!dev->cmbsz) |
| 1793 | return; |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 1794 | dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1795 | |
Christoph Hellwig | 88de459 | 2017-12-20 14:50:00 +0100 | [diff] [blame] | 1796 | size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); |
| 1797 | offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); |
Christoph Hellwig | 8969f1f | 2017-10-01 09:37:35 +0200 | [diff] [blame] | 1798 | bar = NVME_CMB_BIR(dev->cmbloc); |
| 1799 | bar_size = pci_resource_len(pdev, bar); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1800 | |
| 1801 | if (offset > bar_size) |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1802 | return; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1803 | |
| 1804 | /* |
| 1805 | * Controllers may support a CMB size larger than their BAR, |
| 1806 | * for example, due to being behind a bridge. Reduce the CMB to |
| 1807 | * the reported size of the BAR |
| 1808 | */ |
| 1809 | if (size > bar_size - offset) |
| 1810 | size = bar_size - offset; |
| 1811 | |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1812 | if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { |
| 1813 | dev_warn(dev->ctrl.device, |
| 1814 | "failed to register the CMB\n"); |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1815 | return; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1816 | } |
| 1817 | |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1818 | dev->cmb_size = size; |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1819 | dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); |
| 1820 | |
| 1821 | if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == |
| 1822 | (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) |
| 1823 | pci_p2pmem_publish(pdev, true); |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 1824 | |
| 1825 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, |
| 1826 | &dev_attr_cmb.attr, NULL)) |
| 1827 | dev_warn(dev->ctrl.device, |
| 1828 | "failed to add sysfs attribute for CMB\n"); |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1829 | } |
| 1830 | |
| 1831 | static inline void nvme_release_cmb(struct nvme_dev *dev) |
| 1832 | { |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1833 | if (dev->cmb_size) { |
Max Gurtovoy | 1c78f77 | 2017-07-30 01:45:08 +0300 | [diff] [blame] | 1834 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, |
| 1835 | &dev_attr_cmb.attr, NULL); |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 1836 | dev->cmb_size = 0; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 1837 | } |
| 1838 | } |
| 1839 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1840 | static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 1841 | { |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1842 | u64 dma_addr = dev->host_mem_descs_dma; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1843 | struct nvme_command c; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1844 | int ret; |
| 1845 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1846 | memset(&c, 0, sizeof(c)); |
| 1847 | c.features.opcode = nvme_admin_set_features; |
| 1848 | c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); |
| 1849 | c.features.dword11 = cpu_to_le32(bits); |
| 1850 | c.features.dword12 = cpu_to_le32(dev->host_mem_size >> |
| 1851 | ilog2(dev->ctrl.page_size)); |
| 1852 | c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); |
| 1853 | c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); |
| 1854 | c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); |
| 1855 | |
| 1856 | ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); |
| 1857 | if (ret) { |
| 1858 | dev_warn(dev->ctrl.device, |
| 1859 | "failed to set host mem (err %d, flags %#x).\n", |
| 1860 | ret, bits); |
| 1861 | } |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1862 | return ret; |
| 1863 | } |
| 1864 | |
| 1865 | static void nvme_free_host_mem(struct nvme_dev *dev) |
| 1866 | { |
| 1867 | int i; |
| 1868 | |
| 1869 | for (i = 0; i < dev->nr_host_mem_descs; i++) { |
| 1870 | struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; |
| 1871 | size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; |
| 1872 | |
Liviu Dudau | cc667f6 | 2018-12-29 17:23:43 +0000 | [diff] [blame] | 1873 | dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], |
| 1874 | le64_to_cpu(desc->addr), |
| 1875 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1876 | } |
| 1877 | |
| 1878 | kfree(dev->host_mem_desc_bufs); |
| 1879 | dev->host_mem_desc_bufs = NULL; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1880 | dma_free_coherent(dev->dev, |
| 1881 | dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), |
| 1882 | dev->host_mem_descs, dev->host_mem_descs_dma); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1883 | dev->host_mem_descs = NULL; |
Minwoo Im | 7e5dd57 | 2017-11-25 03:03:00 +0900 | [diff] [blame] | 1884 | dev->nr_host_mem_descs = 0; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1885 | } |
| 1886 | |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1887 | static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, |
| 1888 | u32 chunk_size) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1889 | { |
| 1890 | struct nvme_host_mem_buf_desc *descs; |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1891 | u32 max_entries, len; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1892 | dma_addr_t descs_dma; |
Dan Carpenter | 2ee0e4e | 2017-07-06 12:26:52 +0300 | [diff] [blame] | 1893 | int i = 0; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1894 | void **bufs; |
Minwoo Im | 6fbcde6 | 2017-12-05 05:23:54 +0900 | [diff] [blame] | 1895 | u64 size, tmp; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1896 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1897 | tmp = (preferred + chunk_size - 1); |
| 1898 | do_div(tmp, chunk_size); |
| 1899 | max_entries = tmp; |
Christoph Hellwig | 044a9df | 2017-09-11 12:09:28 -0400 | [diff] [blame] | 1900 | |
| 1901 | if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) |
| 1902 | max_entries = dev->ctrl.hmmaxd; |
| 1903 | |
Luis Chamberlain | 750afb0 | 2019-01-04 09:23:09 +0100 | [diff] [blame] | 1904 | descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), |
| 1905 | &descs_dma, GFP_KERNEL); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1906 | if (!descs) |
| 1907 | goto out; |
| 1908 | |
| 1909 | bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); |
| 1910 | if (!bufs) |
| 1911 | goto out_free_descs; |
| 1912 | |
Minwoo Im | 244a8fe | 2017-11-17 01:34:24 +0900 | [diff] [blame] | 1913 | for (size = 0; size < preferred && i < max_entries; size += len) { |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1914 | dma_addr_t dma_addr; |
| 1915 | |
Christoph Hellwig | 50cdb7c | 2017-07-25 17:39:07 +0200 | [diff] [blame] | 1916 | len = min_t(u64, chunk_size, preferred - size); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1917 | bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, |
| 1918 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
| 1919 | if (!bufs[i]) |
| 1920 | break; |
| 1921 | |
| 1922 | descs[i].addr = cpu_to_le64(dma_addr); |
| 1923 | descs[i].size = cpu_to_le32(len / dev->ctrl.page_size); |
| 1924 | i++; |
| 1925 | } |
| 1926 | |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1927 | if (!size) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1928 | goto out_free_bufs; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1929 | |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1930 | dev->nr_host_mem_descs = i; |
| 1931 | dev->host_mem_size = size; |
| 1932 | dev->host_mem_descs = descs; |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1933 | dev->host_mem_descs_dma = descs_dma; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1934 | dev->host_mem_desc_bufs = bufs; |
| 1935 | return 0; |
| 1936 | |
| 1937 | out_free_bufs: |
| 1938 | while (--i >= 0) { |
| 1939 | size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; |
| 1940 | |
Liviu Dudau | cc667f6 | 2018-12-29 17:23:43 +0000 | [diff] [blame] | 1941 | dma_free_attrs(dev->dev, size, bufs[i], |
| 1942 | le64_to_cpu(descs[i].addr), |
| 1943 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1944 | } |
| 1945 | |
| 1946 | kfree(bufs); |
| 1947 | out_free_descs: |
Christoph Hellwig | 4033f35 | 2017-08-28 10:47:18 +0200 | [diff] [blame] | 1948 | dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, |
| 1949 | descs_dma); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1950 | out: |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1951 | dev->host_mem_descs = NULL; |
| 1952 | return -ENOMEM; |
| 1953 | } |
| 1954 | |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1955 | static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) |
| 1956 | { |
| 1957 | u32 chunk_size; |
| 1958 | |
| 1959 | /* start big and work our way down */ |
Akinobu Mita | 30f92d6 | 2017-09-06 12:15:31 +0200 | [diff] [blame] | 1960 | for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); |
Christoph Hellwig | 044a9df | 2017-09-11 12:09:28 -0400 | [diff] [blame] | 1961 | chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 1962 | chunk_size /= 2) { |
| 1963 | if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { |
| 1964 | if (!min || dev->host_mem_size >= min) |
| 1965 | return 0; |
| 1966 | nvme_free_host_mem(dev); |
| 1967 | } |
| 1968 | } |
| 1969 | |
| 1970 | return -ENOMEM; |
| 1971 | } |
| 1972 | |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 1973 | static int nvme_setup_host_mem(struct nvme_dev *dev) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1974 | { |
| 1975 | u64 max = (u64)max_host_mem_size_mb * SZ_1M; |
| 1976 | u64 preferred = (u64)dev->ctrl.hmpre * 4096; |
| 1977 | u64 min = (u64)dev->ctrl.hmmin * 4096; |
| 1978 | u32 enable_bits = NVME_HOST_MEM_ENABLE; |
Minwoo Im | 6fbcde6 | 2017-12-05 05:23:54 +0900 | [diff] [blame] | 1979 | int ret; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1980 | |
| 1981 | preferred = min(preferred, max); |
| 1982 | if (min > max) { |
| 1983 | dev_warn(dev->ctrl.device, |
| 1984 | "min host memory (%lld MiB) above limit (%d MiB).\n", |
| 1985 | min >> ilog2(SZ_1M), max_host_mem_size_mb); |
| 1986 | nvme_free_host_mem(dev); |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 1987 | return 0; |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 1988 | } |
| 1989 | |
| 1990 | /* |
| 1991 | * If we already have a buffer allocated check if we can reuse it. |
| 1992 | */ |
| 1993 | if (dev->host_mem_descs) { |
| 1994 | if (dev->host_mem_size >= min) |
| 1995 | enable_bits |= NVME_HOST_MEM_RETURN; |
| 1996 | else |
| 1997 | nvme_free_host_mem(dev); |
| 1998 | } |
| 1999 | |
| 2000 | if (!dev->host_mem_descs) { |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 2001 | if (nvme_alloc_host_mem(dev, min, preferred)) { |
| 2002 | dev_warn(dev->ctrl.device, |
| 2003 | "failed to allocate host memory buffer.\n"); |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2004 | return 0; /* controller must work without HMB */ |
Christoph Hellwig | 92dc689 | 2017-09-11 12:08:43 -0400 | [diff] [blame] | 2005 | } |
| 2006 | |
| 2007 | dev_info(dev->ctrl.device, |
| 2008 | "allocated %lld MiB host memory buffer.\n", |
| 2009 | dev->host_mem_size >> ilog2(SZ_1M)); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2010 | } |
| 2011 | |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2012 | ret = nvme_set_host_mem(dev, enable_bits); |
| 2013 | if (ret) |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2014 | nvme_free_host_mem(dev); |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2015 | return ret; |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 2016 | } |
| 2017 | |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2018 | /* |
| 2019 | * nirqs is the number of interrupts available for write and read |
| 2020 | * queues. The core already reserved an interrupt for the admin queue. |
| 2021 | */ |
| 2022 | static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2023 | { |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2024 | struct nvme_dev *dev = affd->priv; |
| 2025 | unsigned int nr_read_queues; |
Ming Lei | c45b1fa | 2019-01-03 09:34:39 +0800 | [diff] [blame] | 2026 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2027 | /* |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2028 | * If there is no interupt available for queues, ensure that |
| 2029 | * the default queue is set to 1. The affinity set size is |
| 2030 | * also set to one, but the irq core ignores it for this case. |
| 2031 | * |
| 2032 | * If only one interrupt is available or 'write_queue' == 0, combine |
| 2033 | * write and read queues. |
| 2034 | * |
| 2035 | * If 'write_queues' > 0, ensure it leaves room for at least one read |
| 2036 | * queue. |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2037 | */ |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2038 | if (!nrirqs) { |
| 2039 | nrirqs = 1; |
| 2040 | nr_read_queues = 0; |
| 2041 | } else if (nrirqs == 1 || !write_queues) { |
| 2042 | nr_read_queues = 0; |
| 2043 | } else if (write_queues >= nrirqs) { |
| 2044 | nr_read_queues = 1; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2045 | } else { |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2046 | nr_read_queues = nrirqs - write_queues; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2047 | } |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2048 | |
| 2049 | dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; |
| 2050 | affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; |
| 2051 | dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; |
| 2052 | affd->set_size[HCTX_TYPE_READ] = nr_read_queues; |
| 2053 | affd->nr_sets = nr_read_queues ? 2 : 1; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2054 | } |
| 2055 | |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2056 | static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2057 | { |
| 2058 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2059 | struct irq_affinity affd = { |
Ming Lei | 9cfef55 | 2019-02-16 18:13:08 +0100 | [diff] [blame] | 2060 | .pre_vectors = 1, |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2061 | .calc_sets = nvme_calc_irq_sets, |
| 2062 | .priv = dev, |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2063 | }; |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2064 | unsigned int irq_queues, this_p_queues; |
| 2065 | |
| 2066 | /* |
| 2067 | * Poll queues don't need interrupts, but we need at least one IO |
| 2068 | * queue left over for non-polled IO. |
| 2069 | */ |
| 2070 | this_p_queues = poll_queues; |
| 2071 | if (this_p_queues >= nr_io_queues) { |
| 2072 | this_p_queues = nr_io_queues - 1; |
| 2073 | irq_queues = 1; |
| 2074 | } else { |
Keith Busch | 7e4c6b9 | 2019-12-06 08:11:17 +0900 | [diff] [blame] | 2075 | irq_queues = nr_io_queues - this_p_queues + 1; |
Jens Axboe | 6451fe7 | 2018-12-09 11:21:45 -0700 | [diff] [blame] | 2076 | } |
| 2077 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2078 | |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2079 | /* Initialize for the single interrupt case */ |
| 2080 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
| 2081 | dev->io_queues[HCTX_TYPE_READ] = 0; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2082 | |
Benjamin Herrenschmidt | 6634133 | 2019-08-07 17:51:21 +1000 | [diff] [blame] | 2083 | /* |
| 2084 | * Some Apple controllers require all queues to use the |
| 2085 | * first vector. |
| 2086 | */ |
| 2087 | if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR) |
| 2088 | irq_queues = 1; |
| 2089 | |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 2090 | return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, |
| 2091 | PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2092 | } |
| 2093 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2094 | static void nvme_disable_io_queues(struct nvme_dev *dev) |
| 2095 | { |
| 2096 | if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) |
| 2097 | __nvme_disable_io_queues(dev, nvme_admin_delete_cq); |
| 2098 | } |
| 2099 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2100 | static int nvme_setup_io_queues(struct nvme_dev *dev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2101 | { |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 2102 | struct nvme_queue *adminq = &dev->queues[0]; |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2103 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 2104 | int result, nr_io_queues; |
| 2105 | unsigned long size; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2106 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2107 | nr_io_queues = max_io_queues(); |
Benjamin Herrenschmidt | d38e9f0 | 2019-08-07 17:51:22 +1000 | [diff] [blame] | 2108 | |
| 2109 | /* |
| 2110 | * If tags are shared with admin queue (Apple bug), then |
| 2111 | * make sure we only use one IO queue. |
| 2112 | */ |
| 2113 | if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) |
| 2114 | nr_io_queues = 1; |
| 2115 | |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 2116 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); |
| 2117 | if (result < 0) |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 2118 | return result; |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 2119 | |
Christoph Hellwig | f5fa90d | 2016-06-06 23:20:50 +0200 | [diff] [blame] | 2120 | if (nr_io_queues == 0) |
Keith Busch | a522905 | 2016-04-08 16:09:10 -0600 | [diff] [blame] | 2121 | return 0; |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 2122 | |
| 2123 | clear_bit(NVMEQ_ENABLED, &adminq->flags); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2124 | |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 2125 | if (dev->cmb_use_sqes) { |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 2126 | result = nvme_cmb_qdepth(dev, nr_io_queues, |
| 2127 | sizeof(struct nvme_command)); |
| 2128 | if (result > 0) |
| 2129 | dev->q_depth = result; |
| 2130 | else |
Logan Gunthorpe | 0f238ff | 2018-10-04 15:27:43 -0600 | [diff] [blame] | 2131 | dev->cmb_use_sqes = false; |
Jon Derrick | 8ffaadf | 2015-07-20 10:14:09 -0600 | [diff] [blame] | 2132 | } |
| 2133 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 2134 | do { |
| 2135 | size = db_bar_size(dev, nr_io_queues); |
| 2136 | result = nvme_remap_bar(dev, size); |
| 2137 | if (!result) |
| 2138 | break; |
| 2139 | if (!--nr_io_queues) |
| 2140 | return -ENOMEM; |
| 2141 | } while (1); |
| 2142 | adminq->q_db = dev->dbs; |
Matthew Wilcox | f1938f6 | 2011-10-20 17:00:41 -0400 | [diff] [blame] | 2143 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2144 | retry: |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 2145 | /* Deregister the admin queue's interrupt */ |
Christoph Hellwig | 0ff199c | 2017-04-13 09:06:43 +0200 | [diff] [blame] | 2146 | pci_free_irq(pdev, 0, adminq); |
Keith Busch | 9d713c2 | 2013-07-15 15:02:24 -0600 | [diff] [blame] | 2147 | |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2148 | /* |
| 2149 | * If we enable msix early due to not intx, disable it again before |
| 2150 | * setting up the full range we need. |
| 2151 | */ |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2152 | pci_free_irq_vectors(pdev); |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2153 | |
| 2154 | result = nvme_setup_irqs(dev, nr_io_queues); |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 2155 | if (result <= 0) |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2156 | return -EIO; |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2157 | |
Keith Busch | 22b5560 | 2018-04-12 09:16:10 -0600 | [diff] [blame] | 2158 | dev->num_vecs = result; |
Jens Axboe | 4b04cc6 | 2018-11-05 12:44:33 -0700 | [diff] [blame] | 2159 | result = max(result - 1, 1); |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 2160 | dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 2161 | |
Matthew Wilcox | 063a809 | 2013-06-20 10:53:48 -0400 | [diff] [blame] | 2162 | /* |
| 2163 | * Should investigate if there's a performance win from allocating |
| 2164 | * more queues than interrupt vectors; it might allow the submission |
| 2165 | * path to scale better, even if the receive path is limited by the |
| 2166 | * number of interrupts. |
| 2167 | */ |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2168 | result = queue_request_irq(adminq); |
Keith Busch | 7c349dd | 2019-03-08 10:43:06 -0700 | [diff] [blame] | 2169 | if (result) |
Keith Busch | d487562 | 2016-11-15 15:56:26 -0500 | [diff] [blame] | 2170 | return result; |
Christoph Hellwig | 4e22410 | 2018-12-02 17:46:17 +0100 | [diff] [blame] | 2171 | set_bit(NVMEQ_ENABLED, &adminq->flags); |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2172 | |
| 2173 | result = nvme_create_io_queues(dev); |
| 2174 | if (result || dev->online_queues < 2) |
| 2175 | return result; |
| 2176 | |
| 2177 | if (dev->online_queues - 1 < dev->max_qid) { |
| 2178 | nr_io_queues = dev->online_queues - 1; |
| 2179 | nvme_disable_io_queues(dev); |
| 2180 | nvme_suspend_io_queues(dev); |
| 2181 | goto retry; |
| 2182 | } |
| 2183 | dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", |
| 2184 | dev->io_queues[HCTX_TYPE_DEFAULT], |
| 2185 | dev->io_queues[HCTX_TYPE_READ], |
| 2186 | dev->io_queues[HCTX_TYPE_POLL]); |
| 2187 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2188 | } |
| 2189 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 2190 | static void nvme_del_queue_end(struct request *req, blk_status_t error) |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2191 | { |
| 2192 | struct nvme_queue *nvmeq = req->end_io_data; |
| 2193 | |
| 2194 | blk_mq_free_request(req); |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2195 | complete(&nvmeq->delete_done); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2196 | } |
| 2197 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 2198 | static void nvme_del_cq_end(struct request *req, blk_status_t error) |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2199 | { |
| 2200 | struct nvme_queue *nvmeq = req->end_io_data; |
| 2201 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2202 | if (error) |
| 2203 | set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2204 | |
| 2205 | nvme_del_queue_end(req, error); |
| 2206 | } |
| 2207 | |
| 2208 | static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) |
| 2209 | { |
| 2210 | struct request_queue *q = nvmeq->dev->ctrl.admin_q; |
| 2211 | struct request *req; |
| 2212 | struct nvme_command cmd; |
| 2213 | |
| 2214 | memset(&cmd, 0, sizeof(cmd)); |
| 2215 | cmd.delete_queue.opcode = opcode; |
| 2216 | cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); |
| 2217 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 2218 | req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2219 | if (IS_ERR(req)) |
| 2220 | return PTR_ERR(req); |
| 2221 | |
| 2222 | req->timeout = ADMIN_TIMEOUT; |
| 2223 | req->end_io_data = nvmeq; |
| 2224 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2225 | init_completion(&nvmeq->delete_done); |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2226 | blk_execute_rq_nowait(q, NULL, req, false, |
| 2227 | opcode == nvme_admin_delete_cq ? |
| 2228 | nvme_del_cq_end : nvme_del_queue_end); |
| 2229 | return 0; |
| 2230 | } |
| 2231 | |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2232 | static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2233 | { |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2234 | int nr_queues = dev->online_queues - 1, sent = 0; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2235 | unsigned long timeout; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2236 | |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2237 | retry: |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2238 | timeout = ADMIN_TIMEOUT; |
| 2239 | while (nr_queues > 0) { |
| 2240 | if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) |
| 2241 | break; |
| 2242 | nr_queues--; |
| 2243 | sent++; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2244 | } |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2245 | while (sent) { |
| 2246 | struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; |
| 2247 | |
| 2248 | timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2249 | timeout); |
| 2250 | if (timeout == 0) |
| 2251 | return false; |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2252 | |
Christoph Hellwig | d1ed6aa | 2018-12-02 17:46:22 +0100 | [diff] [blame] | 2253 | sent--; |
Christoph Hellwig | 5271edd | 2018-12-02 17:46:21 +0100 | [diff] [blame] | 2254 | if (nr_queues) |
| 2255 | goto retry; |
| 2256 | } |
| 2257 | return true; |
Keith Busch | db3cbff | 2016-01-12 14:41:17 -0700 | [diff] [blame] | 2258 | } |
| 2259 | |
Keith Busch | 5d02a5c | 2019-09-03 09:22:24 -0600 | [diff] [blame] | 2260 | static void nvme_dev_add(struct nvme_dev *dev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2261 | { |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2262 | int ret; |
| 2263 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2264 | if (!dev->ctrl.tagset) { |
Christoph Hellwig | 376f7ef | 2018-12-02 17:46:27 +0100 | [diff] [blame] | 2265 | dev->tagset.ops = &nvme_mq_ops; |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2266 | dev->tagset.nr_hw_queues = dev->online_queues - 1; |
yangerkun | 8fe34be | 2019-07-23 11:23:13 +0800 | [diff] [blame] | 2267 | dev->tagset.nr_maps = 2; /* default + read */ |
Christoph Hellwig | ed92ad3 | 2018-12-14 14:06:59 +0100 | [diff] [blame] | 2268 | if (dev->io_queues[HCTX_TYPE_POLL]) |
| 2269 | dev->tagset.nr_maps++; |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2270 | dev->tagset.timeout = NVME_IO_TIMEOUT; |
| 2271 | dev->tagset.numa_node = dev_to_node(dev->dev); |
| 2272 | dev->tagset.queue_depth = |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2273 | min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; |
Christoph Hellwig | d43f1cc | 2019-03-05 05:46:58 -0700 | [diff] [blame] | 2274 | dev->tagset.cmd_size = sizeof(struct nvme_iod); |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2275 | dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; |
| 2276 | dev->tagset.driver_data = dev; |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2277 | |
Benjamin Herrenschmidt | d38e9f0 | 2019-08-07 17:51:22 +1000 | [diff] [blame] | 2278 | /* |
| 2279 | * Some Apple controllers requires tags to be unique |
| 2280 | * across admin and IO queue, so reserve the first 32 |
| 2281 | * tags of the IO queue. |
| 2282 | */ |
| 2283 | if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) |
| 2284 | dev->tagset.reserved_tags = NVME_AQ_DEPTH; |
| 2285 | |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2286 | ret = blk_mq_alloc_tag_set(&dev->tagset); |
| 2287 | if (ret) { |
| 2288 | dev_warn(dev->ctrl.device, |
| 2289 | "IO queues tagset allocation failed %d\n", ret); |
Keith Busch | 5d02a5c | 2019-09-03 09:22:24 -0600 | [diff] [blame] | 2290 | return; |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2291 | } |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2292 | dev->ctrl.tagset = &dev->tagset; |
Keith Busch | 949928c | 2015-12-17 17:08:15 -0700 | [diff] [blame] | 2293 | } else { |
| 2294 | blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); |
| 2295 | |
| 2296 | /* Free previously allocated queues that are no longer usable */ |
| 2297 | nvme_free_queues(dev, dev->online_queues); |
Keith Busch | ffe7704 | 2015-06-08 10:08:15 -0600 | [diff] [blame] | 2298 | } |
Keith Busch | 949928c | 2015-12-17 17:08:15 -0700 | [diff] [blame] | 2299 | |
Maxim Levitsky | e8fd41b | 2019-05-02 14:31:33 +0300 | [diff] [blame] | 2300 | nvme_dbbuf_set(dev); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2301 | } |
| 2302 | |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2303 | static int nvme_pci_enable(struct nvme_dev *dev) |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2304 | { |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2305 | int result = -ENOMEM; |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2306 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2307 | |
| 2308 | if (pci_enable_device_mem(pdev)) |
| 2309 | return result; |
| 2310 | |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2311 | pci_set_master(pdev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2312 | |
Christoph Hellwig | 4fe0692 | 2019-06-28 09:17:48 +0200 | [diff] [blame] | 2313 | if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64))) |
Russell King | 052d0ef | 2013-06-26 23:49:11 +0100 | [diff] [blame] | 2314 | goto disable; |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2315 | |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 2316 | if (readl(dev->bar + NVME_REG_CSTS) == -1) { |
Keith Busch | 0e53d18 | 2013-12-10 13:10:39 -0700 | [diff] [blame] | 2317 | result = -ENODEV; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2318 | goto disable; |
Keith Busch | 0e53d18 | 2013-12-10 13:10:39 -0700 | [diff] [blame] | 2319 | } |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2320 | |
| 2321 | /* |
Keith Busch | a522905 | 2016-04-08 16:09:10 -0600 | [diff] [blame] | 2322 | * Some devices and/or platforms don't advertise or work with INTx |
| 2323 | * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll |
| 2324 | * adjust this later. |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2325 | */ |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2326 | result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); |
| 2327 | if (result < 0) |
| 2328 | return result; |
Jens Axboe | e32efbf | 2014-11-14 09:49:26 -0700 | [diff] [blame] | 2329 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2330 | dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 2331 | |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2332 | dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, |
weiping zhang | b27c1e6 | 2017-07-10 16:46:59 +0800 | [diff] [blame] | 2333 | io_queue_depth); |
Sagi Grimberg | aa22c8e | 2019-08-22 10:51:17 -0700 | [diff] [blame] | 2334 | dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2335 | dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); |
Christoph Hellwig | 7a67cbe | 2015-11-20 08:58:10 +0100 | [diff] [blame] | 2336 | dev->dbs = dev->bar + 4096; |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2337 | |
| 2338 | /* |
Benjamin Herrenschmidt | 6634133 | 2019-08-07 17:51:21 +1000 | [diff] [blame] | 2339 | * Some Apple controllers require a non-standard SQE size. |
| 2340 | * Interestingly they also seem to ignore the CC:IOSQES register |
| 2341 | * so we don't bother updating it here. |
| 2342 | */ |
| 2343 | if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) |
| 2344 | dev->io_sqes = 7; |
| 2345 | else |
| 2346 | dev->io_sqes = NVME_NVM_IOSQES; |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2347 | |
| 2348 | /* |
| 2349 | * Temporary fix for the Apple controller found in the MacBook8,1 and |
| 2350 | * some MacBook7,1 to avoid controller resets and data loss. |
| 2351 | */ |
| 2352 | if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { |
| 2353 | dev->q_depth = 2; |
Christoph Hellwig | 9bdcfb1 | 2017-05-20 15:14:43 +0200 | [diff] [blame] | 2354 | dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " |
| 2355 | "set queue depth=%u to work around controller resets\n", |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2356 | dev->q_depth); |
Martin K. Petersen | d554b5e | 2017-06-27 22:27:57 -0400 | [diff] [blame] | 2357 | } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && |
| 2358 | (pdev->device == 0xa821 || pdev->device == 0xa822) && |
Sagi Grimberg | 20d0dfe | 2017-06-27 22:16:38 +0300 | [diff] [blame] | 2359 | NVME_CAP_MQES(dev->ctrl.cap) == 0) { |
Martin K. Petersen | d554b5e | 2017-06-27 22:27:57 -0400 | [diff] [blame] | 2360 | dev->q_depth = 64; |
| 2361 | dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " |
| 2362 | "set queue depth=%u\n", dev->q_depth); |
Stephan Günther | 1f390c1 | 2015-12-01 13:23:22 -0700 | [diff] [blame] | 2363 | } |
| 2364 | |
Benjamin Herrenschmidt | d38e9f0 | 2019-08-07 17:51:22 +1000 | [diff] [blame] | 2365 | /* |
| 2366 | * Controllers with the shared tags quirk need the IO queue to be |
| 2367 | * big enough so that we get 32 tags for the admin queue |
| 2368 | */ |
| 2369 | if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && |
| 2370 | (dev->q_depth < (NVME_AQ_DEPTH + 2))) { |
| 2371 | dev->q_depth = NVME_AQ_DEPTH + 2; |
| 2372 | dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", |
| 2373 | dev->q_depth); |
| 2374 | } |
| 2375 | |
| 2376 | |
Christoph Hellwig | f65efd6 | 2017-12-20 14:25:11 +0100 | [diff] [blame] | 2377 | nvme_map_cmb(dev); |
Stephen Bates | 202021c | 2016-10-05 20:01:12 -0600 | [diff] [blame] | 2378 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2379 | pci_enable_pcie_error_reporting(pdev); |
| 2380 | pci_save_state(pdev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2381 | return 0; |
| 2382 | |
| 2383 | disable: |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2384 | pci_disable_device(pdev); |
| 2385 | return result; |
| 2386 | } |
| 2387 | |
| 2388 | static void nvme_dev_unmap(struct nvme_dev *dev) |
| 2389 | { |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2390 | if (dev->bar) |
| 2391 | iounmap(dev->bar); |
Johannes Thumshirn | a1f447b | 2016-06-07 09:44:02 +0200 | [diff] [blame] | 2392 | pci_release_mem_regions(to_pci_dev(dev->dev)); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2393 | } |
| 2394 | |
| 2395 | static void nvme_pci_disable(struct nvme_dev *dev) |
| 2396 | { |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2397 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2398 | |
Christoph Hellwig | dca51e7 | 2016-09-14 16:18:57 +0200 | [diff] [blame] | 2399 | pci_free_irq_vectors(pdev); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2400 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 2401 | if (pci_is_enabled(pdev)) { |
| 2402 | pci_disable_pcie_error_reporting(pdev); |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2403 | pci_disable_device(pdev); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 2404 | } |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 2405 | } |
| 2406 | |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2407 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2408 | { |
Keith Busch | e43269e | 2019-05-14 14:07:38 -0600 | [diff] [blame] | 2409 | bool dead = true, freeze = false; |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2410 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Keith Busch | 2240427 | 2013-07-15 15:02:20 -0600 | [diff] [blame] | 2411 | |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 2412 | mutex_lock(&dev->shutdown_lock); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2413 | if (pci_is_enabled(pdev)) { |
| 2414 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
| 2415 | |
Keith Busch | ebef736 | 2017-06-27 17:44:05 -0600 | [diff] [blame] | 2416 | if (dev->ctrl.state == NVME_CTRL_LIVE || |
Keith Busch | e43269e | 2019-05-14 14:07:38 -0600 | [diff] [blame] | 2417 | dev->ctrl.state == NVME_CTRL_RESETTING) { |
| 2418 | freeze = true; |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2419 | nvme_start_freeze(&dev->ctrl); |
Keith Busch | e43269e | 2019-05-14 14:07:38 -0600 | [diff] [blame] | 2420 | } |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2421 | dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || |
| 2422 | pdev->error_state != pci_channel_io_normal); |
Keith Busch | c9d3bf8 | 2015-01-07 18:55:52 -0700 | [diff] [blame] | 2423 | } |
Gabriel Krisman Bertazi | c21377f | 2016-08-11 09:35:57 -0600 | [diff] [blame] | 2424 | |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2425 | /* |
| 2426 | * Give the controller a chance to complete all entered requests if |
| 2427 | * doing a safe shutdown. |
| 2428 | */ |
Keith Busch | e43269e | 2019-05-14 14:07:38 -0600 | [diff] [blame] | 2429 | if (!dead && shutdown && freeze) |
| 2430 | nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2431 | |
Jianchao Wang | 9a915a5 | 2018-02-12 20:57:24 +0800 | [diff] [blame] | 2432 | nvme_stop_queues(&dev->ctrl); |
| 2433 | |
Keith Busch | 64ee0ac | 2018-04-12 09:16:08 -0600 | [diff] [blame] | 2434 | if (!dead && dev->ctrl.queue_count > 0) { |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2435 | nvme_disable_io_queues(dev); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2436 | nvme_disable_admin_queue(dev, shutdown); |
Keith Busch | 4d11542 | 2013-12-10 13:10:40 -0700 | [diff] [blame] | 2437 | } |
Keith Busch | 8fae268 | 2019-01-04 15:04:33 -0700 | [diff] [blame] | 2438 | nvme_suspend_io_queues(dev); |
| 2439 | nvme_suspend_queue(&dev->queues[0]); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2440 | nvme_pci_disable(dev); |
Keith Busch | fa46c6f | 2020-02-13 01:41:05 +0900 | [diff] [blame] | 2441 | nvme_reap_pending_cqes(dev); |
Keith Busch | 07836e6 | 2015-02-19 10:34:48 -0700 | [diff] [blame] | 2442 | |
Ming Lin | e1958e6 | 2016-05-18 14:05:01 -0700 | [diff] [blame] | 2443 | blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); |
| 2444 | blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); |
Ming Lei | 622b8b6 | 2019-07-24 11:48:42 +0800 | [diff] [blame] | 2445 | blk_mq_tagset_wait_completed_request(&dev->tagset); |
| 2446 | blk_mq_tagset_wait_completed_request(&dev->admin_tagset); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2447 | |
| 2448 | /* |
| 2449 | * The driver will not be starting up queues again if shutting down so |
| 2450 | * must flush all entered requests to their failed completion to avoid |
| 2451 | * deadlocking blk-mq hot-cpu notifier. |
| 2452 | */ |
Keith Busch | c8e9e9b | 2019-04-30 09:33:41 -0600 | [diff] [blame] | 2453 | if (shutdown) { |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2454 | nvme_start_queues(&dev->ctrl); |
Keith Busch | c8e9e9b | 2019-04-30 09:33:41 -0600 | [diff] [blame] | 2455 | if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) |
| 2456 | blk_mq_unquiesce_queue(dev->ctrl.admin_q); |
| 2457 | } |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 2458 | mutex_unlock(&dev->shutdown_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2459 | } |
| 2460 | |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2461 | static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) |
| 2462 | { |
| 2463 | if (!nvme_wait_reset(&dev->ctrl)) |
| 2464 | return -EBUSY; |
| 2465 | nvme_dev_disable(dev, shutdown); |
| 2466 | return 0; |
| 2467 | } |
| 2468 | |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2469 | static int nvme_setup_prp_pools(struct nvme_dev *dev) |
| 2470 | { |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2471 | dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2472 | PAGE_SIZE, PAGE_SIZE, 0); |
| 2473 | if (!dev->prp_page_pool) |
| 2474 | return -ENOMEM; |
| 2475 | |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 2476 | /* Optimisation for I/Os between 4k and 128k */ |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2477 | dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 2478 | 256, 256, 0); |
| 2479 | if (!dev->prp_small_pool) { |
| 2480 | dma_pool_destroy(dev->prp_page_pool); |
| 2481 | return -ENOMEM; |
| 2482 | } |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2483 | return 0; |
| 2484 | } |
| 2485 | |
| 2486 | static void nvme_release_prp_pools(struct nvme_dev *dev) |
| 2487 | { |
| 2488 | dma_pool_destroy(dev->prp_page_pool); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 2489 | dma_pool_destroy(dev->prp_small_pool); |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2490 | } |
| 2491 | |
Keith Busch | 770597e | 2019-09-05 07:52:33 -0600 | [diff] [blame] | 2492 | static void nvme_free_tagset(struct nvme_dev *dev) |
| 2493 | { |
| 2494 | if (dev->tagset.tags) |
| 2495 | blk_mq_free_tag_set(&dev->tagset); |
| 2496 | dev->ctrl.tagset = NULL; |
| 2497 | } |
| 2498 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2499 | static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2500 | { |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2501 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
Keith Busch | 9ac2709 | 2014-01-31 16:53:39 -0700 | [diff] [blame] | 2502 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 2503 | nvme_dbbuf_dma_free(dev); |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2504 | put_device(dev->dev); |
Keith Busch | 770597e | 2019-09-05 07:52:33 -0600 | [diff] [blame] | 2505 | nvme_free_tagset(dev); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2506 | if (dev->ctrl.admin_q) |
| 2507 | blk_put_queue(dev->ctrl.admin_q); |
Keith Busch | 5e82e95 | 2013-02-19 10:17:58 -0700 | [diff] [blame] | 2508 | kfree(dev->queues); |
Scott Bauer | e286bcf | 2017-02-22 10:15:07 -0700 | [diff] [blame] | 2509 | free_opal_dev(dev->ctrl.opal_dev); |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2510 | mempool_destroy(dev->iod_mempool); |
Keith Busch | 5e82e95 | 2013-02-19 10:17:58 -0700 | [diff] [blame] | 2511 | kfree(dev); |
| 2512 | } |
| 2513 | |
Chaitanya Kulkarni | 7c1ce40 | 2019-06-08 13:16:32 -0700 | [diff] [blame] | 2514 | static void nvme_remove_dead_ctrl(struct nvme_dev *dev) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2515 | { |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2516 | /* |
| 2517 | * Set state to deleting now to avoid blocking nvme_wait_reset(), which |
| 2518 | * may be holding this pci_dev's device lock. |
| 2519 | */ |
| 2520 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
Christoph Hellwig | d22524a | 2017-10-18 13:25:42 +0200 | [diff] [blame] | 2521 | nvme_get_ctrl(&dev->ctrl); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2522 | nvme_dev_disable(dev, false); |
Jianchao Wang | 9f9cafc | 2018-06-20 13:42:22 +0800 | [diff] [blame] | 2523 | nvme_kill_queues(&dev->ctrl); |
Ming Lei | 03e0f3a | 2017-11-09 19:32:07 +0800 | [diff] [blame] | 2524 | if (!queue_work(nvme_wq, &dev->remove_work)) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2525 | nvme_put_ctrl(&dev->ctrl); |
| 2526 | } |
| 2527 | |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2528 | static void nvme_reset_work(struct work_struct *work) |
Keith Busch | 5e82e95 | 2013-02-19 10:17:58 -0700 | [diff] [blame] | 2529 | { |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2530 | struct nvme_dev *dev = |
| 2531 | container_of(work, struct nvme_dev, ctrl.reset_work); |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 2532 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); |
Chaitanya Kulkarni | e71afda | 2019-06-08 13:01:02 -0700 | [diff] [blame] | 2533 | int result; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2534 | |
Chaitanya Kulkarni | e71afda | 2019-06-08 13:01:02 -0700 | [diff] [blame] | 2535 | if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { |
| 2536 | result = -ENODEV; |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2537 | goto out; |
Chaitanya Kulkarni | e71afda | 2019-06-08 13:01:02 -0700 | [diff] [blame] | 2538 | } |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2539 | |
| 2540 | /* |
| 2541 | * If we're called to reset a live controller first shut it down before |
| 2542 | * moving on. |
| 2543 | */ |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2544 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2545 | nvme_dev_disable(dev, false); |
Keith Busch | d6135c3a | 2019-05-14 14:46:09 -0600 | [diff] [blame] | 2546 | nvme_sync_queues(&dev->ctrl); |
Christoph Hellwig | fd634f41 | 2015-11-26 12:42:26 +0100 | [diff] [blame] | 2547 | |
Keith Busch | 5c959d7 | 2019-01-23 18:46:11 -0700 | [diff] [blame] | 2548 | mutex_lock(&dev->shutdown_lock); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2549 | result = nvme_pci_enable(dev); |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2550 | if (result) |
Keith Busch | 4726bcf | 2019-02-11 09:23:50 -0700 | [diff] [blame] | 2551 | goto out_unlock; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2552 | |
Sagi Grimberg | 01ad099 | 2017-05-01 00:27:17 +0300 | [diff] [blame] | 2553 | result = nvme_pci_configure_admin_queue(dev); |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2554 | if (result) |
Keith Busch | 4726bcf | 2019-02-11 09:23:50 -0700 | [diff] [blame] | 2555 | goto out_unlock; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2556 | |
Keith Busch | 0fb59cb | 2015-01-07 18:55:50 -0700 | [diff] [blame] | 2557 | result = nvme_alloc_admin_tags(dev); |
| 2558 | if (result) |
Keith Busch | 4726bcf | 2019-02-11 09:23:50 -0700 | [diff] [blame] | 2559 | goto out_unlock; |
Dan McLeran | b9afca3 | 2014-04-07 17:10:11 -0600 | [diff] [blame] | 2560 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2561 | /* |
| 2562 | * Limit the max command size to prevent iod->sg allocations going |
| 2563 | * over a single page. |
| 2564 | */ |
Christoph Hellwig | 7637de3 | 2019-07-03 09:54:44 -0700 | [diff] [blame] | 2565 | dev->ctrl.max_hw_sectors = min_t(u32, |
| 2566 | NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2567 | dev->ctrl.max_segments = NVME_MAX_SEGS; |
Christoph Hellwig | a48bc52 | 2019-06-05 21:08:24 +0200 | [diff] [blame] | 2568 | |
| 2569 | /* |
| 2570 | * Don't limit the IOMMU merged segment size. |
| 2571 | */ |
| 2572 | dma_set_max_seg_size(dev->dev, 0xffffffff); |
| 2573 | |
Keith Busch | 5c959d7 | 2019-01-23 18:46:11 -0700 | [diff] [blame] | 2574 | mutex_unlock(&dev->shutdown_lock); |
| 2575 | |
| 2576 | /* |
| 2577 | * Introduce CONNECTING state from nvme-fc/rdma transports to mark the |
| 2578 | * initializing procedure here. |
| 2579 | */ |
| 2580 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { |
| 2581 | dev_warn(dev->ctrl.device, |
| 2582 | "failed to mark controller CONNECTING\n"); |
Minwoo Im | cee6c26 | 2019-06-09 03:35:20 +0900 | [diff] [blame] | 2583 | result = -EBUSY; |
Keith Busch | 5c959d7 | 2019-01-23 18:46:11 -0700 | [diff] [blame] | 2584 | goto out; |
| 2585 | } |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2586 | |
Christoph Hellwig | ce4541f | 2015-10-16 07:58:46 +0200 | [diff] [blame] | 2587 | result = nvme_init_identify(&dev->ctrl); |
| 2588 | if (result) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2589 | goto out; |
Christoph Hellwig | ce4541f | 2015-10-16 07:58:46 +0200 | [diff] [blame] | 2590 | |
Scott Bauer | e286bcf | 2017-02-22 10:15:07 -0700 | [diff] [blame] | 2591 | if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { |
| 2592 | if (!dev->ctrl.opal_dev) |
| 2593 | dev->ctrl.opal_dev = |
| 2594 | init_opal_dev(&dev->ctrl, &nvme_sec_submit); |
| 2595 | else if (was_suspend) |
| 2596 | opal_unlock_from_suspend(dev->ctrl.opal_dev); |
| 2597 | } else { |
| 2598 | free_opal_dev(dev->ctrl.opal_dev); |
| 2599 | dev->ctrl.opal_dev = NULL; |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 2600 | } |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 2601 | |
Helen Koike | f9f38e3 | 2017-04-10 12:51:07 -0300 | [diff] [blame] | 2602 | if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { |
| 2603 | result = nvme_dbbuf_dma_alloc(dev); |
| 2604 | if (result) |
| 2605 | dev_warn(dev->dev, |
| 2606 | "unable to allocate dma for dbbuf\n"); |
| 2607 | } |
| 2608 | |
Christoph Hellwig | 9620cfb | 2017-09-06 12:19:57 +0200 | [diff] [blame] | 2609 | if (dev->ctrl.hmpre) { |
| 2610 | result = nvme_setup_host_mem(dev); |
| 2611 | if (result < 0) |
| 2612 | goto out; |
| 2613 | } |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2614 | |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2615 | result = nvme_setup_io_queues(dev); |
Keith Busch | badc34d | 2014-06-23 14:25:35 -0600 | [diff] [blame] | 2616 | if (result) |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2617 | goto out; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2618 | |
Keith Busch | 21f033f | 2016-04-12 11:13:11 -0600 | [diff] [blame] | 2619 | /* |
Christoph Hellwig | 2659e57 | 2015-10-02 18:51:31 +0200 | [diff] [blame] | 2620 | * Keep the controller around but remove all namespaces if we don't have |
| 2621 | * any working I/O queue. |
| 2622 | */ |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2623 | if (dev->online_queues < 2) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 2624 | dev_warn(dev->ctrl.device, "IO queues not created\n"); |
Keith Busch | 3b24774 | 2016-04-27 15:51:18 -0600 | [diff] [blame] | 2625 | nvme_kill_queues(&dev->ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2626 | nvme_remove_namespaces(&dev->ctrl); |
Keith Busch | 770597e | 2019-09-05 07:52:33 -0600 | [diff] [blame] | 2627 | nvme_free_tagset(dev); |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2628 | } else { |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 2629 | nvme_start_queues(&dev->ctrl); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2630 | nvme_wait_freeze(&dev->ctrl); |
Keith Busch | 5d02a5c | 2019-09-03 09:22:24 -0600 | [diff] [blame] | 2631 | nvme_dev_add(dev); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2632 | nvme_unfreeze(&dev->ctrl); |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2633 | } |
| 2634 | |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2635 | /* |
| 2636 | * If only admin queue live, keep it to do further investigation or |
| 2637 | * recovery. |
| 2638 | */ |
Keith Busch | 5d02a5c | 2019-09-03 09:22:24 -0600 | [diff] [blame] | 2639 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { |
Jianchao Wang | 2b1b7e7 | 2018-01-06 08:01:58 +0800 | [diff] [blame] | 2640 | dev_warn(dev->ctrl.device, |
Keith Busch | 5d02a5c | 2019-09-03 09:22:24 -0600 | [diff] [blame] | 2641 | "failed to mark controller live state\n"); |
Chaitanya Kulkarni | e71afda | 2019-06-08 13:01:02 -0700 | [diff] [blame] | 2642 | result = -ENODEV; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 2643 | goto out; |
| 2644 | } |
Christoph Hellwig | 92911a5 | 2016-04-26 13:51:58 +0200 | [diff] [blame] | 2645 | |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 2646 | nvme_start_ctrl(&dev->ctrl); |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2647 | return; |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2648 | |
Keith Busch | 4726bcf | 2019-02-11 09:23:50 -0700 | [diff] [blame] | 2649 | out_unlock: |
| 2650 | mutex_unlock(&dev->shutdown_lock); |
Christoph Hellwig | 3cf519b | 2015-10-03 09:49:23 +0200 | [diff] [blame] | 2651 | out: |
Chaitanya Kulkarni | 7c1ce40 | 2019-06-08 13:16:32 -0700 | [diff] [blame] | 2652 | if (result) |
| 2653 | dev_warn(dev->ctrl.device, |
| 2654 | "Removing after probe failure status: %d\n", result); |
| 2655 | nvme_remove_dead_ctrl(dev); |
Keith Busch | f0b5073 | 2013-07-15 15:02:21 -0600 | [diff] [blame] | 2656 | } |
| 2657 | |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 2658 | static void nvme_remove_dead_ctrl_work(struct work_struct *work) |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2659 | { |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 2660 | struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2661 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2662 | |
| 2663 | if (pci_get_drvdata(pdev)) |
Keith Busch | 921920a | 2016-03-28 16:03:21 -0600 | [diff] [blame] | 2664 | device_release_driver(&pdev->dev); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2665 | nvme_put_ctrl(&dev->ctrl); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2666 | } |
| 2667 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2668 | static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) |
Keith Busch | 4cc0652 | 2015-06-05 10:30:08 -0600 | [diff] [blame] | 2669 | { |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2670 | *val = readl(to_nvme_dev(ctrl)->bar + off); |
| 2671 | return 0; |
Keith Busch | 4cc0652 | 2015-06-05 10:30:08 -0600 | [diff] [blame] | 2672 | } |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2673 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 2674 | static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) |
| 2675 | { |
| 2676 | writel(val, to_nvme_dev(ctrl)->bar + off); |
| 2677 | return 0; |
| 2678 | } |
| 2679 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 2680 | static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) |
| 2681 | { |
Ard Biesheuvel | 3a8ecc9 | 2019-10-03 13:57:29 +0200 | [diff] [blame] | 2682 | *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 2683 | return 0; |
| 2684 | } |
| 2685 | |
Keith Busch | 97c1222 | 2018-03-08 14:50:32 -0700 | [diff] [blame] | 2686 | static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) |
| 2687 | { |
| 2688 | struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); |
| 2689 | |
| 2690 | return snprintf(buf, size, "%s", dev_name(&pdev->dev)); |
| 2691 | } |
| 2692 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2693 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 2694 | .name = "pcie", |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 2695 | .module = THIS_MODULE, |
Logan Gunthorpe | e0596ab | 2018-10-04 15:27:44 -0600 | [diff] [blame] | 2696 | .flags = NVME_F_METADATA_SUPPORTED | |
| 2697 | NVME_F_PCI_P2PDMA, |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2698 | .reg_read32 = nvme_pci_reg_read32, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 2699 | .reg_write32 = nvme_pci_reg_write32, |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 2700 | .reg_read64 = nvme_pci_reg_read64, |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2701 | .free_ctrl = nvme_pci_free_ctrl, |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2702 | .submit_async_event = nvme_pci_submit_async_event, |
Keith Busch | 97c1222 | 2018-03-08 14:50:32 -0700 | [diff] [blame] | 2703 | .get_address = nvme_pci_get_address, |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 2704 | }; |
Keith Busch | 4cc0652 | 2015-06-05 10:30:08 -0600 | [diff] [blame] | 2705 | |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2706 | static int nvme_dev_map(struct nvme_dev *dev) |
| 2707 | { |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2708 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2709 | |
Johannes Thumshirn | a1f447b | 2016-06-07 09:44:02 +0200 | [diff] [blame] | 2710 | if (pci_request_mem_regions(pdev, "nvme")) |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2711 | return -ENODEV; |
| 2712 | |
Xu Yu | 97f6ef6 | 2017-05-24 16:39:55 +0800 | [diff] [blame] | 2713 | if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2714 | goto release; |
| 2715 | |
Max Gurtovoy | 9fa196e | 2016-12-19 16:18:24 +0200 | [diff] [blame] | 2716 | return 0; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2717 | release: |
Max Gurtovoy | 9fa196e | 2016-12-19 16:18:24 +0200 | [diff] [blame] | 2718 | pci_release_mem_regions(pdev); |
| 2719 | return -ENODEV; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2720 | } |
| 2721 | |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2722 | static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2723 | { |
| 2724 | if (pdev->vendor == 0x144d && pdev->device == 0xa802) { |
| 2725 | /* |
| 2726 | * Several Samsung devices seem to drop off the PCIe bus |
| 2727 | * randomly when APST is on and uses the deepest sleep state. |
| 2728 | * This has been observed on a Samsung "SM951 NVMe SAMSUNG |
| 2729 | * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD |
| 2730 | * 950 PRO 256GB", but it seems to be restricted to two Dell |
| 2731 | * laptops. |
| 2732 | */ |
| 2733 | if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && |
| 2734 | (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || |
| 2735 | dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) |
| 2736 | return NVME_QUIRK_NO_DEEPEST_PS; |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2737 | } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { |
| 2738 | /* |
| 2739 | * Samsung SSD 960 EVO drops off the PCIe bus after system |
Jarosław Janik | 467c77d4 | 2018-03-11 19:51:56 +0100 | [diff] [blame] | 2740 | * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as |
| 2741 | * within few minutes after bootup on a Coffee Lake board - |
| 2742 | * ASUS PRIME Z370-A |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2743 | */ |
| 2744 | if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && |
Jarosław Janik | 467c77d4 | 2018-03-11 19:51:56 +0100 | [diff] [blame] | 2745 | (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || |
| 2746 | dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2747 | return NVME_QUIRK_NO_APST; |
Shyjumon N | 1fae37a | 2020-02-06 13:17:25 -0700 | [diff] [blame] | 2748 | } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || |
| 2749 | pdev->device == 0xa808 || pdev->device == 0xa809)) || |
| 2750 | (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { |
| 2751 | /* |
| 2752 | * Forcing to use host managed nvme power settings for |
| 2753 | * lowest idle power with quick resume latency on |
| 2754 | * Samsung and Toshiba SSDs based on suspend behavior |
| 2755 | * on Coffee Lake board for LENOVO C640 |
| 2756 | */ |
| 2757 | if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && |
| 2758 | dmi_match(DMI_BOARD_NAME, "LNVNB161216")) |
| 2759 | return NVME_QUIRK_SIMPLE_SUSPEND; |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2760 | } |
| 2761 | |
| 2762 | return 0; |
| 2763 | } |
| 2764 | |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2765 | static void nvme_async_probe(void *data, async_cookie_t cookie) |
| 2766 | { |
| 2767 | struct nvme_dev *dev = data; |
Keith Busch | 80f513b | 2018-05-07 08:30:24 -0600 | [diff] [blame] | 2768 | |
Keith Busch | bd46a90 | 2019-07-29 16:34:52 -0600 | [diff] [blame] | 2769 | flush_work(&dev->ctrl.reset_work); |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2770 | flush_work(&dev->ctrl.scan_work); |
Keith Busch | 80f513b | 2018-05-07 08:30:24 -0600 | [diff] [blame] | 2771 | nvme_put_ctrl(&dev->ctrl); |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2772 | } |
| 2773 | |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2774 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2775 | { |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2776 | int node, result = -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2777 | struct nvme_dev *dev; |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2778 | unsigned long quirks = id->driver_data; |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2779 | size_t alloc_size; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2780 | |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2781 | node = dev_to_node(&pdev->dev); |
| 2782 | if (node == NUMA_NO_NODE) |
Masayoshi Mizuma | 2fa8435 | 2016-06-20 09:33:17 +0900 | [diff] [blame] | 2783 | set_dev_node(&pdev->dev, first_memory_node); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2784 | |
| 2785 | dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2786 | if (!dev) |
| 2787 | return -ENOMEM; |
Sagi Grimberg | 147b27e | 2018-01-14 12:39:01 +0200 | [diff] [blame] | 2788 | |
Jens Axboe | 3b6592f | 2018-10-31 08:36:31 -0600 | [diff] [blame] | 2789 | dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), |
| 2790 | GFP_KERNEL, node); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2791 | if (!dev->queues) |
| 2792 | goto free; |
| 2793 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2794 | dev->dev = get_device(&pdev->dev); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2795 | pci_set_drvdata(pdev, dev); |
Keith Busch | b3fffde | 2015-02-03 11:21:42 -0700 | [diff] [blame] | 2796 | |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2797 | result = nvme_dev_map(dev); |
| 2798 | if (result) |
Christophe JAILLET | b00c9b7 | 2017-07-16 10:39:03 +0200 | [diff] [blame] | 2799 | goto put_pci; |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2800 | |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2801 | INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); |
Christoph Hellwig | 5c8809e | 2015-11-26 12:35:49 +0100 | [diff] [blame] | 2802 | INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); |
Keith Busch | 77bf25e | 2015-11-26 12:21:29 +0100 | [diff] [blame] | 2803 | mutex_init(&dev->shutdown_lock); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2804 | |
| 2805 | result = nvme_setup_prp_pools(dev); |
| 2806 | if (result) |
Christophe JAILLET | b00c9b7 | 2017-07-16 10:39:03 +0200 | [diff] [blame] | 2807 | goto unmap; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2808 | |
Kai-Heng Feng | 8427bbc | 2017-11-09 01:12:03 -0500 | [diff] [blame] | 2809 | quirks |= check_vendor_combination_bug(pdev); |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 2810 | |
Jens Axboe | 943e942 | 2018-06-21 09:49:37 -0600 | [diff] [blame] | 2811 | /* |
| 2812 | * Double check that our mempool alloc size will cover the biggest |
| 2813 | * command we support. |
| 2814 | */ |
| 2815 | alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ, |
| 2816 | NVME_MAX_SEGS, true); |
| 2817 | WARN_ON_ONCE(alloc_size > PAGE_SIZE); |
| 2818 | |
| 2819 | dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, |
| 2820 | mempool_kfree, |
| 2821 | (void *) alloc_size, |
| 2822 | GFP_KERNEL, node); |
| 2823 | if (!dev->iod_mempool) { |
| 2824 | result = -ENOMEM; |
| 2825 | goto release_pools; |
| 2826 | } |
| 2827 | |
Keith Busch | b6e44b4 | 2018-07-11 16:44:44 -0600 | [diff] [blame] | 2828 | result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, |
| 2829 | quirks); |
| 2830 | if (result) |
| 2831 | goto release_mempool; |
| 2832 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 2833 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
| 2834 | |
Keith Busch | bd46a90 | 2019-07-29 16:34:52 -0600 | [diff] [blame] | 2835 | nvme_reset_ctrl(&dev->ctrl); |
Keith Busch | 80f513b | 2018-05-07 08:30:24 -0600 | [diff] [blame] | 2836 | nvme_get_ctrl(&dev->ctrl); |
Keith Busch | 18119775 | 2018-04-27 13:42:52 -0600 | [diff] [blame] | 2837 | async_schedule(nvme_async_probe, dev); |
Sagi Grimberg | 4caff8f | 2017-12-31 14:01:19 +0200 | [diff] [blame] | 2838 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2839 | return 0; |
| 2840 | |
Keith Busch | b6e44b4 | 2018-07-11 16:44:44 -0600 | [diff] [blame] | 2841 | release_mempool: |
| 2842 | mempool_destroy(dev->iod_mempool); |
Keith Busch | 0877cb0 | 2013-07-15 15:02:19 -0600 | [diff] [blame] | 2843 | release_pools: |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 2844 | nvme_release_prp_pools(dev); |
Christophe JAILLET | b00c9b7 | 2017-07-16 10:39:03 +0200 | [diff] [blame] | 2845 | unmap: |
| 2846 | nvme_dev_unmap(dev); |
Keith Busch | a96d4f5 | 2014-08-19 19:15:59 -0600 | [diff] [blame] | 2847 | put_pci: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2848 | put_device(dev->dev); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2849 | free: |
| 2850 | kfree(dev->queues); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2851 | kfree(dev); |
| 2852 | return result; |
| 2853 | } |
| 2854 | |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2855 | static void nvme_reset_prepare(struct pci_dev *pdev) |
Keith Busch | f0d54a5 | 2014-05-02 10:40:43 -0600 | [diff] [blame] | 2856 | { |
Keith Busch | a673947 | 2014-06-23 16:03:21 -0600 | [diff] [blame] | 2857 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2858 | |
| 2859 | /* |
| 2860 | * We don't need to check the return value from waiting for the reset |
| 2861 | * state as pci_dev device lock is held, making it impossible to race |
| 2862 | * with ->remove(). |
| 2863 | */ |
| 2864 | nvme_disable_prepare_reset(dev, false); |
| 2865 | nvme_sync_queues(&dev->ctrl); |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2866 | } |
Keith Busch | f0d54a5 | 2014-05-02 10:40:43 -0600 | [diff] [blame] | 2867 | |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 2868 | static void nvme_reset_done(struct pci_dev *pdev) |
| 2869 | { |
Linus Torvalds | f263fbb | 2017-07-08 15:51:57 -0700 | [diff] [blame] | 2870 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2871 | |
| 2872 | if (!nvme_try_sched_reset(&dev->ctrl)) |
| 2873 | flush_work(&dev->ctrl.reset_work); |
Keith Busch | f0d54a5 | 2014-05-02 10:40:43 -0600 | [diff] [blame] | 2874 | } |
| 2875 | |
Keith Busch | 09ece14 | 2014-01-27 11:29:40 -0500 | [diff] [blame] | 2876 | static void nvme_shutdown(struct pci_dev *pdev) |
| 2877 | { |
| 2878 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2879 | nvme_disable_prepare_reset(dev, true); |
Keith Busch | 09ece14 | 2014-01-27 11:29:40 -0500 | [diff] [blame] | 2880 | } |
| 2881 | |
Keith Busch | f58944e | 2016-02-24 09:15:55 -0700 | [diff] [blame] | 2882 | /* |
| 2883 | * The driver's remove may be called on a device in a partially initialized |
| 2884 | * state. This function must not have any dependencies on the device state in |
| 2885 | * order to proceed. |
| 2886 | */ |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 2887 | static void nvme_remove(struct pci_dev *pdev) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2888 | { |
| 2889 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2890 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 2891 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2892 | pci_set_drvdata(pdev, NULL); |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2893 | |
Keith Busch | 6db28ed | 2017-02-10 18:15:49 -0500 | [diff] [blame] | 2894 | if (!pci_device_is_present(pdev)) { |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2895 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); |
Keith Busch | 1d39e69 | 2018-06-06 08:13:08 -0600 | [diff] [blame] | 2896 | nvme_dev_disable(dev, true); |
Keith Busch | cb4bfda | 2018-10-15 10:19:06 -0600 | [diff] [blame] | 2897 | nvme_dev_remove_admin(dev); |
Keith Busch | 6db28ed | 2017-02-10 18:15:49 -0500 | [diff] [blame] | 2898 | } |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2899 | |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 2900 | flush_work(&dev->ctrl.reset_work); |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 2901 | nvme_stop_ctrl(&dev->ctrl); |
| 2902 | nvme_remove_namespaces(&dev->ctrl); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 2903 | nvme_dev_disable(dev, true); |
Keith Busch | 9fe5c59 | 2018-10-31 13:15:29 -0600 | [diff] [blame] | 2904 | nvme_release_cmb(dev); |
Christoph Hellwig | 87ad72a | 2017-05-12 17:02:58 +0200 | [diff] [blame] | 2905 | nvme_free_host_mem(dev); |
Matias Bjørling | a4aea56 | 2014-11-04 08:20:14 -0700 | [diff] [blame] | 2906 | nvme_dev_remove_admin(dev); |
| 2907 | nvme_free_queues(dev, 0); |
Sagi Grimberg | d09f2b4 | 2017-07-02 10:56:43 +0300 | [diff] [blame] | 2908 | nvme_uninit_ctrl(&dev->ctrl); |
Keith Busch | 9a6b945 | 2013-12-10 13:10:36 -0700 | [diff] [blame] | 2909 | nvme_release_prp_pools(dev); |
Keith Busch | b00a726 | 2016-02-24 09:15:52 -0700 | [diff] [blame] | 2910 | nvme_dev_unmap(dev); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 2911 | nvme_put_ctrl(&dev->ctrl); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 2912 | } |
| 2913 | |
Jingoo Han | 671a601 | 2014-02-13 11:19:14 +0900 | [diff] [blame] | 2914 | #ifdef CONFIG_PM_SLEEP |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2915 | static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) |
| 2916 | { |
| 2917 | return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); |
| 2918 | } |
| 2919 | |
| 2920 | static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) |
| 2921 | { |
| 2922 | return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); |
| 2923 | } |
| 2924 | |
| 2925 | static int nvme_resume(struct device *dev) |
| 2926 | { |
| 2927 | struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); |
| 2928 | struct nvme_ctrl *ctrl = &ndev->ctrl; |
| 2929 | |
Rafael J. Wysocki | 4eaefe8 | 2019-08-08 23:58:38 +0200 | [diff] [blame] | 2930 | if (ndev->last_ps == U32_MAX || |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2931 | nvme_set_power_state(ctrl, ndev->last_ps) != 0) |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2932 | return nvme_try_sched_reset(&ndev->ctrl); |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2933 | return 0; |
| 2934 | } |
| 2935 | |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 2936 | static int nvme_suspend(struct device *dev) |
| 2937 | { |
| 2938 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2939 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2940 | struct nvme_ctrl *ctrl = &ndev->ctrl; |
| 2941 | int ret = -EBUSY; |
| 2942 | |
Rafael J. Wysocki | 4eaefe8 | 2019-08-08 23:58:38 +0200 | [diff] [blame] | 2943 | ndev->last_ps = U32_MAX; |
| 2944 | |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2945 | /* |
| 2946 | * The platform does not remove power for a kernel managed suspend so |
| 2947 | * use host managed nvme power settings for lowest idle power if |
| 2948 | * possible. This should have quicker resume latency than a full device |
| 2949 | * shutdown. But if the firmware is involved after the suspend or the |
| 2950 | * device does not support any non-default power states, shut down the |
| 2951 | * device fully. |
Rafael J. Wysocki | 4eaefe8 | 2019-08-08 23:58:38 +0200 | [diff] [blame] | 2952 | * |
| 2953 | * If ASPM is not enabled for the device, shut down the device and allow |
| 2954 | * the PCI bus layer to put it into D3 in order to take the PCIe link |
| 2955 | * down, so as to allow the platform to achieve its minimum low-power |
| 2956 | * state (which may not be possible if the link is up). |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2957 | */ |
Rafael J. Wysocki | 4eaefe8 | 2019-08-08 23:58:38 +0200 | [diff] [blame] | 2958 | if (pm_suspend_via_firmware() || !ctrl->npss || |
Mario Limonciello | cb32de1 | 2019-08-16 15:16:19 -0500 | [diff] [blame] | 2959 | !pcie_aspm_enabled(pdev) || |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2960 | (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) |
| 2961 | return nvme_disable_prepare_reset(ndev, true); |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2962 | |
| 2963 | nvme_start_freeze(ctrl); |
| 2964 | nvme_wait_freeze(ctrl); |
| 2965 | nvme_sync_queues(ctrl); |
| 2966 | |
Keith Busch | 5d02a5c | 2019-09-03 09:22:24 -0600 | [diff] [blame] | 2967 | if (ctrl->state != NVME_CTRL_LIVE) |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2968 | goto unfreeze; |
| 2969 | |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2970 | ret = nvme_get_power_state(ctrl, &ndev->last_ps); |
| 2971 | if (ret < 0) |
| 2972 | goto unfreeze; |
| 2973 | |
Mario Limonciello | 7cbb5c6 | 2019-09-18 13:15:55 -0500 | [diff] [blame] | 2974 | /* |
| 2975 | * A saved state prevents pci pm from generically controlling the |
| 2976 | * device's power. If we're using protocol specific settings, we don't |
| 2977 | * want pci interfering. |
| 2978 | */ |
| 2979 | pci_save_state(pdev); |
| 2980 | |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2981 | ret = nvme_set_power_state(ctrl, ctrl->npss); |
| 2982 | if (ret < 0) |
| 2983 | goto unfreeze; |
| 2984 | |
| 2985 | if (ret) { |
Mario Limonciello | 7cbb5c6 | 2019-09-18 13:15:55 -0500 | [diff] [blame] | 2986 | /* discard the saved state */ |
| 2987 | pci_load_saved_state(pdev, NULL); |
| 2988 | |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2989 | /* |
| 2990 | * Clearing npss forces a controller reset on resume. The |
Geert Uytterhoeven | 05d3046 | 2019-10-24 17:24:00 +0200 | [diff] [blame] | 2991 | * correct value will be rediscovered then. |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2992 | */ |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 2993 | ret = nvme_disable_prepare_reset(ndev, true); |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2994 | ctrl->npss = 0; |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2995 | } |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 2996 | unfreeze: |
| 2997 | nvme_unfreeze(ctrl); |
| 2998 | return ret; |
| 2999 | } |
| 3000 | |
| 3001 | static int nvme_simple_suspend(struct device *dev) |
| 3002 | { |
| 3003 | struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 3004 | return nvme_disable_prepare_reset(ndev, true); |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 3005 | } |
| 3006 | |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 3007 | static int nvme_simple_resume(struct device *dev) |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 3008 | { |
| 3009 | struct pci_dev *pdev = to_pci_dev(dev); |
| 3010 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 3011 | |
Keith Busch | c1ac9a4b | 2019-09-04 10:06:11 -0600 | [diff] [blame] | 3012 | return nvme_try_sched_reset(&ndev->ctrl); |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 3013 | } |
| 3014 | |
YueHaibing | 2177422 | 2019-06-26 10:09:02 +0800 | [diff] [blame] | 3015 | static const struct dev_pm_ops nvme_dev_pm_ops = { |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 3016 | .suspend = nvme_suspend, |
| 3017 | .resume = nvme_resume, |
| 3018 | .freeze = nvme_simple_suspend, |
| 3019 | .thaw = nvme_simple_resume, |
| 3020 | .poweroff = nvme_simple_suspend, |
| 3021 | .restore = nvme_simple_resume, |
| 3022 | }; |
| 3023 | #endif /* CONFIG_PM_SLEEP */ |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3024 | |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3025 | static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, |
| 3026 | pci_channel_state_t state) |
| 3027 | { |
| 3028 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 3029 | |
| 3030 | /* |
| 3031 | * A frozen channel requires a reset. When detected, this method will |
| 3032 | * shutdown the controller to quiesce. The controller will be restarted |
| 3033 | * after the slot reset through driver's slot_reset callback. |
| 3034 | */ |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3035 | switch (state) { |
| 3036 | case pci_channel_io_normal: |
| 3037 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 3038 | case pci_channel_io_frozen: |
Keith Busch | d011fb3 | 2016-04-04 15:07:41 -0600 | [diff] [blame] | 3039 | dev_warn(dev->ctrl.device, |
| 3040 | "frozen state error detected, reset controller\n"); |
Keith Busch | a5cdb68 | 2016-01-12 14:41:18 -0700 | [diff] [blame] | 3041 | nvme_dev_disable(dev, false); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3042 | return PCI_ERS_RESULT_NEED_RESET; |
| 3043 | case pci_channel_io_perm_failure: |
Keith Busch | d011fb3 | 2016-04-04 15:07:41 -0600 | [diff] [blame] | 3044 | dev_warn(dev->ctrl.device, |
| 3045 | "failure state error detected, request disconnect\n"); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3046 | return PCI_ERS_RESULT_DISCONNECT; |
| 3047 | } |
| 3048 | return PCI_ERS_RESULT_NEED_RESET; |
| 3049 | } |
| 3050 | |
| 3051 | static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) |
| 3052 | { |
| 3053 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 3054 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 3055 | dev_info(dev->ctrl.device, "restart after slot reset\n"); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3056 | pci_restore_state(pdev); |
Christoph Hellwig | d86c4d8 | 2017-06-15 15:41:08 +0200 | [diff] [blame] | 3057 | nvme_reset_ctrl(&dev->ctrl); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3058 | return PCI_ERS_RESULT_RECOVERED; |
| 3059 | } |
| 3060 | |
| 3061 | static void nvme_error_resume(struct pci_dev *pdev) |
| 3062 | { |
Keith Busch | 72cd4cc | 2018-05-24 16:16:04 -0600 | [diff] [blame] | 3063 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 3064 | |
| 3065 | flush_work(&dev->ctrl.reset_work); |
Keith Busch | a0a3408 | 2015-12-07 15:30:31 -0700 | [diff] [blame] | 3066 | } |
| 3067 | |
Stephen Hemminger | 1d35203 | 2012-09-07 09:33:17 -0700 | [diff] [blame] | 3068 | static const struct pci_error_handlers nvme_err_handler = { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3069 | .error_detected = nvme_error_detected, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3070 | .slot_reset = nvme_slot_reset, |
| 3071 | .resume = nvme_error_resume, |
Christoph Hellwig | 775755e | 2017-06-01 13:10:38 +0200 | [diff] [blame] | 3072 | .reset_prepare = nvme_reset_prepare, |
| 3073 | .reset_done = nvme_reset_done, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3074 | }; |
| 3075 | |
Matthew Wilcox | 6eb0d69 | 2014-03-24 10:11:22 -0400 | [diff] [blame] | 3076 | static const struct pci_device_id nvme_id_table[] = { |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 3077 | { PCI_VDEVICE(INTEL, 0x0953), |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 3078 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 3079 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
Keith Busch | 99466e7 | 2016-05-02 15:14:24 -0600 | [diff] [blame] | 3080 | { PCI_VDEVICE(INTEL, 0x0a53), |
| 3081 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 3082 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
Keith Busch | 99466e7 | 2016-05-02 15:14:24 -0600 | [diff] [blame] | 3083 | { PCI_VDEVICE(INTEL, 0x0a54), |
| 3084 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 3085 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
David Wayne Fugate | f99cb7af | 2017-07-10 12:39:59 -0600 | [diff] [blame] | 3086 | { PCI_VDEVICE(INTEL, 0x0a55), |
| 3087 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
| 3088 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
Andy Lutomirski | 50af47d | 2017-05-24 15:06:31 -0700 | [diff] [blame] | 3089 | { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ |
Jens Axboe | 9abd68e | 2018-05-08 10:25:15 -0600 | [diff] [blame] | 3090 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
Akinobu Mita | 6c6aa2f | 2019-11-15 00:40:01 +0900 | [diff] [blame] | 3091 | NVME_QUIRK_MEDIUM_PRIO_SQ | |
| 3092 | NVME_QUIRK_NO_TEMP_THRESH_CHANGE }, |
James Dingwall | 6299358 | 2019-01-08 10:20:51 -0700 | [diff] [blame] | 3093 | { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ |
| 3094 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 3095 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ |
Christoph Hellwig | 7b210e4 | 2019-03-13 18:55:05 +0100 | [diff] [blame] | 3096 | .driver_data = NVME_QUIRK_IDENTIFY_CNS | |
| 3097 | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
Micah Parrish | 0302ae6 | 2018-04-12 13:25:25 -0600 | [diff] [blame] | 3098 | { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ |
| 3099 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 3100 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ |
| 3101 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Jeff Lien | 8c97eec | 2017-11-21 10:44:37 -0600 | [diff] [blame] | 3102 | { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ |
| 3103 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Wenbo Wang | 015282c | 2016-09-08 12:12:11 -0400 | [diff] [blame] | 3104 | { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ |
| 3105 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Martin K. Petersen | d554b5e | 2017-06-27 22:27:57 -0400 | [diff] [blame] | 3106 | { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ |
| 3107 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
| 3108 | { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ |
| 3109 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
Christoph Hellwig | 608cc4b | 2017-09-06 11:45:24 +0200 | [diff] [blame] | 3110 | { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ |
| 3111 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
| 3112 | { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ |
| 3113 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
Wei Xu | ea48e87 | 2018-04-26 14:59:19 -0600 | [diff] [blame] | 3114 | { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ |
| 3115 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
Misha Nasledov | 08b903b | 2019-07-15 00:11:49 -0700 | [diff] [blame] | 3116 | { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ |
| 3117 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
Gabriel Craciunescu | f03e42c | 2019-09-23 20:22:56 +0200 | [diff] [blame] | 3118 | { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ |
| 3119 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
| 3120 | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3121 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
Andy Shevchenko | 98f7b86 | 2020-02-12 12:32:18 +0200 | [diff] [blame] | 3122 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), |
| 3123 | .driver_data = NVME_QUIRK_SINGLE_VECTOR }, |
Daniel Roschka | 124298b | 2017-02-22 15:17:29 -0700 | [diff] [blame] | 3124 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, |
Benjamin Herrenschmidt | 6634133 | 2019-08-07 17:51:21 +1000 | [diff] [blame] | 3125 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), |
| 3126 | .driver_data = NVME_QUIRK_SINGLE_VECTOR | |
Benjamin Herrenschmidt | d38e9f0 | 2019-08-07 17:51:22 +1000 | [diff] [blame] | 3127 | NVME_QUIRK_128_BYTES_SQES | |
| 3128 | NVME_QUIRK_SHARED_TAGS }, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3129 | { 0, } |
| 3130 | }; |
| 3131 | MODULE_DEVICE_TABLE(pci, nvme_id_table); |
| 3132 | |
| 3133 | static struct pci_driver nvme_driver = { |
| 3134 | .name = "nvme", |
| 3135 | .id_table = nvme_id_table, |
| 3136 | .probe = nvme_probe, |
Greg Kroah-Hartman | 8d85fce | 2012-12-21 15:13:49 -0800 | [diff] [blame] | 3137 | .remove = nvme_remove, |
Keith Busch | 09ece14 | 2014-01-27 11:29:40 -0500 | [diff] [blame] | 3138 | .shutdown = nvme_shutdown, |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 3139 | #ifdef CONFIG_PM_SLEEP |
Keith Busch | cd63894 | 2013-07-15 15:02:23 -0600 | [diff] [blame] | 3140 | .driver = { |
| 3141 | .pm = &nvme_dev_pm_ops, |
| 3142 | }, |
Keith Busch | d916b1b | 2019-05-23 09:27:35 -0600 | [diff] [blame] | 3143 | #endif |
Alexander Duyck | 74d986a | 2018-04-24 16:47:27 -0500 | [diff] [blame] | 3144 | .sriov_configure = pci_sriov_configure_simple, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3145 | .err_handler = &nvme_err_handler, |
| 3146 | }; |
| 3147 | |
| 3148 | static int __init nvme_init(void) |
| 3149 | { |
Christoph Hellwig | 8110154 | 2019-04-30 11:36:52 -0400 | [diff] [blame] | 3150 | BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); |
| 3151 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); |
| 3152 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); |
Ming Lei | 612b728 | 2019-02-16 18:13:10 +0100 | [diff] [blame] | 3153 | BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); |
Keith Busch | 17c33167 | 2019-12-07 01:16:59 +0900 | [diff] [blame] | 3154 | |
| 3155 | write_queues = min(write_queues, num_possible_cpus()); |
| 3156 | poll_queues = min(poll_queues, num_possible_cpus()); |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 3157 | return pci_register_driver(&nvme_driver); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3158 | } |
| 3159 | |
| 3160 | static void __exit nvme_exit(void) |
| 3161 | { |
| 3162 | pci_unregister_driver(&nvme_driver); |
Ming Lei | 03e0f3a | 2017-11-09 19:32:07 +0800 | [diff] [blame] | 3163 | flush_workqueue(nvme_wq); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3164 | } |
| 3165 | |
| 3166 | MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); |
| 3167 | MODULE_LICENSE("GPL"); |
Keith Busch | c78b4713 | 2014-11-21 15:16:32 -0700 | [diff] [blame] | 3168 | MODULE_VERSION("1.0"); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 3169 | module_init(nvme_init); |
| 3170 | module_exit(nvme_exit); |