blob: a17f80fa3881fc54a4bd849219baa4d540c1d40d [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
Matthew Wilcox8de05532011-05-12 13:50:28 -040021#include <linux/bitops.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022#include <linux/blkdev.h>
Matthew Wilcoxfd63e9ce2011-05-06 08:37:54 -040023#include <linux/delay.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050024#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
Matthew Wilcox5aff9382011-05-06 08:45:47 -040027#include <linux/idr.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050032#include <linux/kthread.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050033#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -050038#include <linux/poison.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050039#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
42#include <linux/version.h>
43
44#define NVME_Q_DEPTH 1024
45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47#define NVME_MINORS 64
Matthew Wilcoxe85248e2011-02-06 18:30:16 -050048#define IO_TIMEOUT (5 * HZ)
49#define ADMIN_TIMEOUT (60 * HZ)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050050
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
Matthew Wilcox58ffacb2011-02-06 07:28:06 -050054static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050057static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050061/*
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
63 */
64struct nvme_dev {
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050065 struct list_head node;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050066 struct nvme_queue **queues;
67 u32 __iomem *dbs;
68 struct pci_dev *pci_dev;
Matthew Wilcox091b6092011-02-10 09:56:01 -050069 struct dma_pool *prp_page_pool;
Matthew Wilcox99802a72011-02-10 10:30:34 -050070 struct dma_pool *prp_small_pool;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050071 int instance;
72 int queue_count;
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040073 int db_stride;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050074 u32 ctrl_config;
75 struct msix_entry *entry;
76 struct nvme_bar __iomem *bar;
77 struct list_head namespaces;
Matthew Wilcox51814232011-02-01 16:18:08 -050078 char serial[20];
79 char model[40];
80 char firmware_rev[8];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050081};
82
83/*
84 * An NVM Express namespace is equivalent to a SCSI LUN
85 */
86struct nvme_ns {
87 struct list_head list;
88
89 struct nvme_dev *dev;
90 struct request_queue *queue;
91 struct gendisk *disk;
92
93 int ns_id;
94 int lba_shift;
95};
96
97/*
98 * An NVM Express queue. Each device has at least two (one for admin
99 * commands and one for I/O commands).
100 */
101struct nvme_queue {
102 struct device *q_dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500103 struct nvme_dev *dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500104 spinlock_t q_lock;
105 struct nvme_command *sq_cmds;
106 volatile struct nvme_completion *cqes;
107 dma_addr_t sq_dma_addr;
108 dma_addr_t cq_dma_addr;
109 wait_queue_head_t sq_full;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500110 wait_queue_t sq_cong_wait;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500111 struct bio_list sq_cong;
112 u32 __iomem *q_db;
113 u16 q_depth;
114 u16 cq_vector;
115 u16 sq_head;
116 u16 sq_tail;
117 u16 cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500118 u16 cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500119 unsigned long cmdid_data[];
120};
121
122/*
123 * Check we didin't inadvertently grow the command struct
124 */
125static inline void _nvme_check_size(void)
126{
127 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136}
137
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500138struct nvme_cmd_info {
139 unsigned long ctx;
140 unsigned long timeout;
141};
142
143static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
144{
145 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
146}
147
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500148/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400149 * alloc_cmdid() - Allocate a Command ID
150 * @nvmeq: The queue that will be used for this command
151 * @ctx: A pointer that will be passed to the handler
152 * @handler: The ID of the handler to call
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500153 *
154 * Allocate a Command ID for a queue. The data passed in will
155 * be passed to the completion handler. This is implemented by using
156 * the bottom two bits of the ctx pointer to store the handler ID.
157 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
158 * We can change this if it becomes a problem.
Matthew Wilcox184d2942011-05-11 21:36:38 -0400159 *
160 * May be called with local interrupts disabled and the q_lock held,
161 * or with interrupts enabled and no locks held.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500162 */
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500163static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
164 unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500165{
Matthew Wilcoxe6d15f72011-02-24 08:49:41 -0500166 int depth = nvmeq->q_depth - 1;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500167 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500168 int cmdid;
169
170 BUG_ON((unsigned long)ctx & 3);
171
172 do {
173 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
174 if (cmdid >= depth)
175 return -EBUSY;
176 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
177
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500178 info[cmdid].ctx = (unsigned long)ctx | handler;
179 info[cmdid].timeout = jiffies + timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500180 return cmdid;
181}
182
183static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500184 int handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500185{
186 int cmdid;
187 wait_event_killable(nvmeq->sq_full,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500188 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500189 return (cmdid < 0) ? -EINTR : cmdid;
190}
191
Matthew Wilcoxfa922822011-03-16 16:29:00 -0400192/*
193 * If you need more than four handlers, you'll need to change how
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500194 * alloc_cmdid and nvme_process_cq work. Consider using a special
195 * CMD_CTX value instead, if that works for your situation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500196 */
197enum {
198 sync_completion_id = 0,
199 bio_completion_id,
200};
201
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500202/* Special values must be a multiple of 4, and less than 0x1000 */
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500203#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
Matthew Wilcoxd2d87032011-02-07 15:55:59 -0500204#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
205#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
206#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500207#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500208
Matthew Wilcox184d2942011-05-11 21:36:38 -0400209/*
210 * Called with local interrupts disabled and the q_lock held. May not sleep.
211 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500212static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
213{
214 unsigned long data;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500215 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500216
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500217 if (cmdid >= nvmeq->q_depth)
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500218 return CMD_CTX_INVALID;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500219 data = info[cmdid].ctx;
220 info[cmdid].ctx = CMD_CTX_COMPLETED;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500221 clear_bit(cmdid, nvmeq->cmdid_data);
222 wake_up(&nvmeq->sq_full);
223 return data;
224}
225
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700226static unsigned long cancel_cmdid(struct nvme_queue *nvmeq, int cmdid)
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500227{
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700228 unsigned long data;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500229 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700230 data = info[cmdid].ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500231 info[cmdid].ctx = CMD_CTX_CANCELLED;
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700232 return data;
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500233}
234
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500235static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
236{
Matthew Wilcox9ecdc942011-03-16 16:52:19 -0400237 return ns->dev->queues[get_cpu() + 1];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500238}
239
240static void put_nvmeq(struct nvme_queue *nvmeq)
241{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500242 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500243}
244
245/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400246 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500247 * @nvmeq: The queue to use
248 * @cmd: The command to send
249 *
250 * Safe to use from interrupt context
251 */
252static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
253{
254 unsigned long flags;
255 u16 tail;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500256 spin_lock_irqsave(&nvmeq->q_lock, flags);
257 tail = nvmeq->sq_tail;
258 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500259 if (++tail == nvmeq->q_depth)
260 tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500261 writel(tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500262 nvmeq->sq_tail = tail;
263 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
264
265 return 0;
266}
267
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500268struct nvme_prps {
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400269 int npages; /* 0 means small pool in use */
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500270 dma_addr_t first_dma;
271 __le64 *list[0];
272};
273
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500274static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500275{
276 const int last_prp = PAGE_SIZE / 8 - 1;
277 int i;
278 dma_addr_t prp_dma;
279
280 if (!prps)
281 return;
282
283 prp_dma = prps->first_dma;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500284
285 if (prps->npages == 0)
286 dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500287 for (i = 0; i < prps->npages; i++) {
288 __le64 *prp_list = prps->list[i];
289 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
Matthew Wilcox091b6092011-02-10 09:56:01 -0500290 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500291 prp_dma = next_prp_dma;
292 }
293 kfree(prps);
294}
295
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500296struct nvme_bio {
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500297 struct bio *bio;
298 int nents;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500299 struct nvme_prps *prps;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500300 struct scatterlist sg[0];
301};
302
303/* XXX: use a mempool */
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500304static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500305{
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500306 return kzalloc(sizeof(struct nvme_bio) +
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500307 sizeof(struct scatterlist) * nseg, gfp);
308}
309
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500310static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500311{
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500312 nvme_free_prps(nvmeq->dev, nbio->prps);
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500313 kfree(nbio);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500314}
315
316static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
317 struct nvme_completion *cqe)
318{
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500319 struct nvme_bio *nbio = ctx;
320 struct bio *bio = nbio->bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500321 u16 status = le16_to_cpup(&cqe->status) >> 1;
322
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500323 dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500324 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500325 free_nbio(nvmeq, nbio);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700326 if (status) {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500327 bio_endio(bio, -EIO);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700328 } else if (bio->bi_vcnt > bio->bi_idx) {
Matthew Wilcoxeac623b2011-05-20 09:34:43 -0400329 if (bio_list_empty(&nvmeq->sq_cong))
330 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500331 bio_list_add(&nvmeq->sq_cong, bio);
332 wake_up_process(nvme_thread);
333 } else {
334 bio_endio(bio, 0);
335 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500336}
337
Matthew Wilcox184d2942011-05-11 21:36:38 -0400338/* length is in bytes. gfp flags indicates whether we may sleep. */
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500339static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500340 struct nvme_common_command *cmd,
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400341 struct scatterlist *sg, int *len,
342 gfp_t gfp)
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500343{
Matthew Wilcox99802a72011-02-10 10:30:34 -0500344 struct dma_pool *pool;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400345 int length = *len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500346 int dma_len = sg_dma_len(sg);
347 u64 dma_addr = sg_dma_address(sg);
348 int offset = offset_in_page(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500349 __le64 *prp_list;
350 dma_addr_t prp_dma;
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400351 int nprps, npages, i;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500352 struct nvme_prps *prps = NULL;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500353
354 cmd->prp1 = cpu_to_le64(dma_addr);
355 length -= (PAGE_SIZE - offset);
356 if (length <= 0)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500357 return prps;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500358
359 dma_len -= (PAGE_SIZE - offset);
360 if (dma_len) {
361 dma_addr += (PAGE_SIZE - offset);
362 } else {
363 sg = sg_next(sg);
364 dma_addr = sg_dma_address(sg);
365 dma_len = sg_dma_len(sg);
366 }
367
368 if (length <= PAGE_SIZE) {
369 cmd->prp2 = cpu_to_le64(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500370 return prps;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500371 }
372
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500373 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400374 npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400375 prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
376 if (!prps) {
377 cmd->prp2 = cpu_to_le64(dma_addr);
378 *len = (*len - length) + PAGE_SIZE;
379 return prps;
380 }
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400381
Matthew Wilcox99802a72011-02-10 10:30:34 -0500382 if (nprps <= (256 / 8)) {
383 pool = dev->prp_small_pool;
384 prps->npages = 0;
385 } else {
386 pool = dev->prp_page_pool;
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400387 prps->npages = 1;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500388 }
389
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400390 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
391 if (!prp_list) {
392 cmd->prp2 = cpu_to_le64(dma_addr);
393 *len = (*len - length) + PAGE_SIZE;
394 kfree(prps);
395 return NULL;
396 }
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400397 prps->list[0] = prp_list;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500398 prps->first_dma = prp_dma;
399 cmd->prp2 = cpu_to_le64(prp_dma);
400 i = 0;
401 for (;;) {
Matthew Wilcox7523d832011-03-16 16:43:40 -0400402 if (i == PAGE_SIZE / 8) {
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500403 __le64 *old_prp_list = prp_list;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400404 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
405 if (!prp_list) {
406 *len = (*len - length);
407 return prps;
408 }
Nisheeth Bhat0d1bc912011-09-29 10:10:10 -0400409 prps->list[prps->npages++] = prp_list;
Matthew Wilcox7523d832011-03-16 16:43:40 -0400410 prp_list[0] = old_prp_list[i - 1];
411 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
412 i = 1;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500413 }
414 prp_list[i++] = cpu_to_le64(dma_addr);
415 dma_len -= PAGE_SIZE;
416 dma_addr += PAGE_SIZE;
417 length -= PAGE_SIZE;
418 if (length <= 0)
419 break;
420 if (dma_len > 0)
421 continue;
422 BUG_ON(dma_len < 0);
423 sg = sg_next(sg);
424 dma_addr = sg_dma_address(sg);
425 dma_len = sg_dma_len(sg);
426 }
427
428 return prps;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500429}
430
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500431/* NVMe scatterlists require no holes in the virtual address */
432#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
433 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
434
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500435static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500436 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
437{
Matthew Wilcox76830842011-02-10 13:55:39 -0500438 struct bio_vec *bvec, *bvprv = NULL;
439 struct scatterlist *sg = NULL;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500440 int i, old_idx, length = 0, nsegs = 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500441
Matthew Wilcox76830842011-02-10 13:55:39 -0500442 sg_init_table(nbio->sg, psegs);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500443 old_idx = bio->bi_idx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500444 bio_for_each_segment(bvec, bio, i) {
Matthew Wilcox76830842011-02-10 13:55:39 -0500445 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
446 sg->length += bvec->bv_len;
447 } else {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500448 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
449 break;
Matthew Wilcox76830842011-02-10 13:55:39 -0500450 sg = sg ? sg + 1 : nbio->sg;
451 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
452 bvec->bv_offset);
453 nsegs++;
454 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500455 length += bvec->bv_len;
Matthew Wilcox76830842011-02-10 13:55:39 -0500456 bvprv = bvec;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500457 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500458 bio->bi_idx = i;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500459 nbio->nents = nsegs;
Matthew Wilcox76830842011-02-10 13:55:39 -0500460 sg_mark_end(sg);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500461 if (dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir) == 0) {
462 bio->bi_idx = old_idx;
463 return -ENOMEM;
464 }
465 return length;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500466}
467
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500468static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
469 int cmdid)
470{
471 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
472
473 memset(cmnd, 0, sizeof(*cmnd));
474 cmnd->common.opcode = nvme_cmd_flush;
475 cmnd->common.command_id = cmdid;
476 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
477
478 if (++nvmeq->sq_tail == nvmeq->q_depth)
479 nvmeq->sq_tail = 0;
480 writel(nvmeq->sq_tail, nvmeq->q_db);
481
482 return 0;
483}
484
485static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
486{
487 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
488 sync_completion_id, IO_TIMEOUT);
489 if (unlikely(cmdid < 0))
490 return cmdid;
491
492 return nvme_submit_flush(nvmeq, ns, cmdid);
493}
494
Matthew Wilcox184d2942011-05-11 21:36:38 -0400495/*
496 * Called with local interrupts disabled and the q_lock held. May not sleep.
497 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500498static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
499 struct bio *bio)
500{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500501 struct nvme_command *cmnd;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500502 struct nvme_bio *nbio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500503 enum dma_data_direction dma_dir;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500504 int cmdid, length, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500505 u16 control;
506 u32 dsmgmt;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500507 int psegs = bio_phys_segments(ns->queue, bio);
508
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500509 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
510 result = nvme_submit_flush_data(nvmeq, ns);
511 if (result)
512 return result;
513 }
514
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500515 nbio = alloc_nbio(psegs, GFP_ATOMIC);
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500516 if (!nbio)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500517 goto nomem;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500518 nbio->bio = bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500519
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500520 result = -EBUSY;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500521 cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500522 if (unlikely(cmdid < 0))
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500523 goto free_nbio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500524
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500525 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
526 return nvme_submit_flush(nvmeq, ns, cmdid);
527
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500528 control = 0;
529 if (bio->bi_rw & REQ_FUA)
530 control |= NVME_RW_FUA;
531 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
532 control |= NVME_RW_LR;
533
534 dsmgmt = 0;
535 if (bio->bi_rw & REQ_RAHEAD)
536 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
537
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500538 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500539
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500540 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500541 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500542 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500543 dma_dir = DMA_TO_DEVICE;
544 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500545 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500546 dma_dir = DMA_FROM_DEVICE;
547 }
548
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500549 result = nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs);
550 if (result < 0)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500551 goto free_nbio;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500552 length = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500553
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500554 cmnd->rw.command_id = cmdid;
555 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500556 nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400557 &length, GFP_ATOMIC);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500558 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500559 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500560 cmnd->rw.control = cpu_to_le16(control);
561 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500562
Matthew Wilcoxd8ee9d62011-02-24 08:46:00 -0500563 bio->bi_sector += length >> 9;
564
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500565 if (++nvmeq->sq_tail == nvmeq->q_depth)
566 nvmeq->sq_tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500567 writel(nvmeq->sq_tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500568
Matthew Wilcox1974b1a2011-02-10 12:01:09 -0500569 return 0;
570
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500571 free_nbio:
572 free_nbio(nvmeq, nbio);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500573 nomem:
574 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500575}
576
577/*
578 * NB: return value of non-zero would mean that we were a stacking driver.
579 * make_request must always succeed.
580 */
581static int nvme_make_request(struct request_queue *q, struct bio *bio)
582{
583 struct nvme_ns *ns = q->queuedata;
584 struct nvme_queue *nvmeq = get_nvmeq(ns);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500585 int result = -EBUSY;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500586
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500587 spin_lock_irq(&nvmeq->q_lock);
588 if (bio_list_empty(&nvmeq->sq_cong))
589 result = nvme_submit_bio_queue(nvmeq, ns, bio);
590 if (unlikely(result)) {
591 if (bio_list_empty(&nvmeq->sq_cong))
592 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500593 bio_list_add(&nvmeq->sq_cong, bio);
594 }
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500595
596 spin_unlock_irq(&nvmeq->q_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500597 put_nvmeq(nvmeq);
598
599 return 0;
600}
601
602struct sync_cmd_info {
603 struct task_struct *task;
604 u32 result;
605 int status;
606};
607
608static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
609 struct nvme_completion *cqe)
610{
611 struct sync_cmd_info *cmdinfo = ctx;
Matthew Wilcoxc4270552011-02-22 14:15:34 -0500612 if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED))
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500613 return;
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500614 if ((unsigned long)cmdinfo == CMD_CTX_FLUSH)
615 return;
Matthew Wilcoxb36235d2011-02-06 08:49:55 -0500616 if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
617 dev_warn(nvmeq->q_dmadev,
618 "completed id %d twice on queue %d\n",
619 cqe->command_id, le16_to_cpup(&cqe->sq_id));
620 return;
621 }
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500622 if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) {
623 dev_warn(nvmeq->q_dmadev,
624 "invalid id %d completed on queue %d\n",
625 cqe->command_id, le16_to_cpup(&cqe->sq_id));
626 return;
627 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500628 cmdinfo->result = le32_to_cpup(&cqe->result);
629 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
630 wake_up_process(cmdinfo->task);
631}
632
633typedef void (*completion_fn)(struct nvme_queue *, void *,
634 struct nvme_completion *);
635
Matthew Wilcox8de05532011-05-12 13:50:28 -0400636static const completion_fn nvme_completions[4] = {
637 [sync_completion_id] = sync_completion,
638 [bio_completion_id] = bio_completion,
639};
640
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500641static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
642{
Matthew Wilcox82123462011-01-20 13:24:06 -0500643 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500644
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500645 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500646 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500647
648 for (;;) {
649 unsigned long data;
650 void *ptr;
651 unsigned char handler;
652 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500653 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500654 break;
655 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
656 if (++head == nvmeq->q_depth) {
657 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500658 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500659 }
660
661 data = free_cmdid(nvmeq, cqe.command_id);
662 handler = data & 3;
663 ptr = (void *)(data & ~3UL);
Matthew Wilcox8de05532011-05-12 13:50:28 -0400664 nvme_completions[handler](nvmeq, ptr, &cqe);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500665 }
666
667 /* If the controller ignores the cq head doorbell and continuously
668 * writes to the queue, it is theoretically possible to wrap around
669 * the queue twice and mistakenly return IRQ_NONE. Linux only
670 * requires that 0.1% of your interrupts are handled, so this isn't
671 * a big problem.
672 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500673 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500674 return IRQ_NONE;
675
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400676 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500677 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500678 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500679
680 return IRQ_HANDLED;
681}
682
683static irqreturn_t nvme_irq(int irq, void *data)
684{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500685 irqreturn_t result;
686 struct nvme_queue *nvmeq = data;
687 spin_lock(&nvmeq->q_lock);
688 result = nvme_process_cq(nvmeq);
689 spin_unlock(&nvmeq->q_lock);
690 return result;
691}
692
693static irqreturn_t nvme_irq_check(int irq, void *data)
694{
695 struct nvme_queue *nvmeq = data;
696 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
697 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
698 return IRQ_NONE;
699 return IRQ_WAKE_THREAD;
700}
701
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500702static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
703{
704 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700705 cancel_cmdid(nvmeq, cmdid);
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500706 spin_unlock_irq(&nvmeq->q_lock);
707}
708
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500709/*
710 * Returns 0 on success. If the result is negative, it's a Linux error code;
711 * if the result is positive, it's an NVM Express status code
712 */
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500713static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500714 struct nvme_command *cmd, u32 *result, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500715{
716 int cmdid;
717 struct sync_cmd_info cmdinfo;
718
719 cmdinfo.task = current;
720 cmdinfo.status = -EINTR;
721
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500722 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
723 timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500724 if (cmdid < 0)
725 return cmdid;
726 cmd->common.command_id = cmdid;
727
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500728 set_current_state(TASK_KILLABLE);
729 nvme_submit_cmd(nvmeq, cmd);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500730 schedule();
731
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500732 if (cmdinfo.status == -EINTR) {
733 nvme_abort_command(nvmeq, cmdid);
734 return -EINTR;
735 }
736
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500737 if (result)
738 *result = cmdinfo.result;
739
740 return cmdinfo.status;
741}
742
743static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
744 u32 *result)
745{
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500746 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500747}
748
749static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
750{
751 int status;
752 struct nvme_command c;
753
754 memset(&c, 0, sizeof(c));
755 c.delete_queue.opcode = opcode;
756 c.delete_queue.qid = cpu_to_le16(id);
757
758 status = nvme_submit_admin_cmd(dev, &c, NULL);
759 if (status)
760 return -EIO;
761 return 0;
762}
763
764static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
765 struct nvme_queue *nvmeq)
766{
767 int status;
768 struct nvme_command c;
769 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
770
771 memset(&c, 0, sizeof(c));
772 c.create_cq.opcode = nvme_admin_create_cq;
773 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
774 c.create_cq.cqid = cpu_to_le16(qid);
775 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
776 c.create_cq.cq_flags = cpu_to_le16(flags);
777 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
778
779 status = nvme_submit_admin_cmd(dev, &c, NULL);
780 if (status)
781 return -EIO;
782 return 0;
783}
784
785static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
786 struct nvme_queue *nvmeq)
787{
788 int status;
789 struct nvme_command c;
790 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
791
792 memset(&c, 0, sizeof(c));
793 c.create_sq.opcode = nvme_admin_create_sq;
794 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
795 c.create_sq.sqid = cpu_to_le16(qid);
796 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
797 c.create_sq.sq_flags = cpu_to_le16(flags);
798 c.create_sq.cqid = cpu_to_le16(qid);
799
800 status = nvme_submit_admin_cmd(dev, &c, NULL);
801 if (status)
802 return -EIO;
803 return 0;
804}
805
806static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
807{
808 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
809}
810
811static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
812{
813 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
814}
815
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400816static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
817 dma_addr_t dma_addr)
818{
819 struct nvme_command c;
820
821 memset(&c, 0, sizeof(c));
822 c.identify.opcode = nvme_admin_identify;
823 c.identify.nsid = cpu_to_le32(nsid);
824 c.identify.prp1 = cpu_to_le64(dma_addr);
825 c.identify.cns = cpu_to_le32(cns);
826
827 return nvme_submit_admin_cmd(dev, &c, NULL);
828}
829
830static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
831 unsigned dword11, dma_addr_t dma_addr, u32 *result)
832{
833 struct nvme_command c;
834
835 memset(&c, 0, sizeof(c));
836 c.features.opcode = nvme_admin_get_features;
837 c.features.prp1 = cpu_to_le64(dma_addr);
838 c.features.fid = cpu_to_le32(fid);
839 c.features.dword11 = cpu_to_le32(dword11);
840
841 return nvme_submit_admin_cmd(dev, &c, result);
842}
843
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500844static void nvme_free_queue(struct nvme_dev *dev, int qid)
845{
846 struct nvme_queue *nvmeq = dev->queues[qid];
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400847 int vector = dev->entry[nvmeq->cq_vector].vector;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500848
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400849 irq_set_affinity_hint(vector, NULL);
850 free_irq(vector, nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500851
852 /* Don't tell the adapter to delete the admin queue */
853 if (qid) {
854 adapter_delete_sq(dev, qid);
855 adapter_delete_cq(dev, qid);
856 }
857
858 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
859 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
860 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
861 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
862 kfree(nvmeq);
863}
864
865static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
866 int depth, int vector)
867{
868 struct device *dmadev = &dev->pci_dev->dev;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500869 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500870 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
871 if (!nvmeq)
872 return NULL;
873
874 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
875 &nvmeq->cq_dma_addr, GFP_KERNEL);
876 if (!nvmeq->cqes)
877 goto free_nvmeq;
878 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
879
880 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
881 &nvmeq->sq_dma_addr, GFP_KERNEL);
882 if (!nvmeq->sq_cmds)
883 goto free_cqdma;
884
885 nvmeq->q_dmadev = dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500886 nvmeq->dev = dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500887 spin_lock_init(&nvmeq->q_lock);
888 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500889 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500890 init_waitqueue_head(&nvmeq->sq_full);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500891 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500892 bio_list_init(&nvmeq->sq_cong);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400893 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500894 nvmeq->q_depth = depth;
895 nvmeq->cq_vector = vector;
896
897 return nvmeq;
898
899 free_cqdma:
900 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
901 nvmeq->cq_dma_addr);
902 free_nvmeq:
903 kfree(nvmeq);
904 return NULL;
905}
906
Matthew Wilcox30010822011-01-20 09:10:15 -0500907static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
908 const char *name)
909{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500910 if (use_threaded_interrupts)
911 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
Matthew Wilcoxec6ce612011-02-06 09:01:00 -0500912 nvme_irq_check, nvme_irq,
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500913 IRQF_DISABLED | IRQF_SHARED,
914 name, nvmeq);
Matthew Wilcox30010822011-01-20 09:10:15 -0500915 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
916 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
917}
918
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500919static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
920 int qid, int cq_size, int vector)
921{
922 int result;
923 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
924
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500925 if (!nvmeq)
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700926 return ERR_PTR(-ENOMEM);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500927
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500928 result = adapter_alloc_cq(dev, qid, nvmeq);
929 if (result < 0)
930 goto free_nvmeq;
931
932 result = adapter_alloc_sq(dev, qid, nvmeq);
933 if (result < 0)
934 goto release_cq;
935
Matthew Wilcox30010822011-01-20 09:10:15 -0500936 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500937 if (result < 0)
938 goto release_sq;
939
940 return nvmeq;
941
942 release_sq:
943 adapter_delete_sq(dev, qid);
944 release_cq:
945 adapter_delete_cq(dev, qid);
946 free_nvmeq:
947 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
948 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
949 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
950 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
951 kfree(nvmeq);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700952 return ERR_PTR(result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500953}
954
955static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
956{
957 int result;
958 u32 aqa;
Matthew Wilcox22605f92011-04-19 15:04:20 -0400959 u64 cap;
960 unsigned long timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500961 struct nvme_queue *nvmeq;
962
963 dev->dbs = ((void __iomem *)dev->bar) + 4096;
964
965 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500966 if (!nvmeq)
967 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500968
969 aqa = nvmeq->q_depth - 1;
970 aqa |= aqa << 16;
971
972 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
973 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
974 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400975 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500976
Shane Michael Matthews5911f202011-02-01 11:31:55 -0500977 writel(0, &dev->bar->cc);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500978 writel(aqa, &dev->bar->aqa);
979 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
980 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
981 writel(dev->ctrl_config, &dev->bar->cc);
982
Matthew Wilcox22605f92011-04-19 15:04:20 -0400983 cap = readq(&dev->bar->cap);
984 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400985 dev->db_stride = NVME_CAP_STRIDE(cap);
Matthew Wilcox22605f92011-04-19 15:04:20 -0400986
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500987 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
988 msleep(100);
989 if (fatal_signal_pending(current))
990 return -EINTR;
Matthew Wilcox22605f92011-04-19 15:04:20 -0400991 if (time_after(jiffies, timeout)) {
992 dev_err(&dev->pci_dev->dev,
993 "Device not ready; aborting initialisation\n");
994 return -ENODEV;
995 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500996 }
997
Matthew Wilcox30010822011-01-20 09:10:15 -0500998 result = queue_request_irq(dev, nvmeq, "nvme admin");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500999 dev->queues[0] = nvmeq;
1000 return result;
1001}
1002
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001003static int nvme_map_user_pages(struct nvme_dev *dev, int write,
1004 unsigned long addr, unsigned length,
1005 struct scatterlist **sgp)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001006{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001007 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001008 struct scatterlist *sg;
1009 struct page **pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001010
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001011 if (addr & 3)
1012 return -EINVAL;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001013 if (!length)
1014 return -EINVAL;
1015
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001016 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001017 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1018 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001019
1020 err = get_user_pages_fast(addr, count, 1, pages);
1021 if (err < count) {
1022 count = err;
1023 err = -EFAULT;
1024 goto put_pages;
1025 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001026
1027 sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001028 sg_init_table(sg, count);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001029 for (i = 0; i < count; i++) {
1030 sg_set_page(&sg[i], pages[i],
1031 min_t(int, length, PAGE_SIZE - offset), offset);
1032 length -= (PAGE_SIZE - offset);
1033 offset = 0;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001034 }
1035
1036 err = -ENOMEM;
1037 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1038 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001039 if (!nents)
1040 goto put_pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001041
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001042 kfree(pages);
1043 *sgp = sg;
1044 return nents;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001045
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001046 put_pages:
1047 for (i = 0; i < count; i++)
1048 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001049 kfree(pages);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001050 return err;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001051}
1052
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001053static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
Nisheeth Bhatd1a490e2011-09-15 16:52:24 -04001054 unsigned long addr, int length, struct scatterlist *sg)
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001055{
1056 int i, count;
1057
1058 count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
Nisheeth Bhatd1a490e2011-09-15 16:52:24 -04001059 dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001060
1061 for (i = 0; i < count; i++)
1062 put_page(sg_page(&sg[i]));
1063}
1064
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001065static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1066{
1067 struct nvme_dev *dev = ns->dev;
1068 struct nvme_queue *nvmeq;
1069 struct nvme_user_io io;
1070 struct nvme_command c;
1071 unsigned length;
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001072 int nents, status;
1073 struct scatterlist *sg;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001074 struct nvme_prps *prps;
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001075
1076 if (copy_from_user(&io, uio, sizeof(io)))
1077 return -EFAULT;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001078 length = (io.nblocks + 1) << ns->lba_shift;
1079
1080 switch (io.opcode) {
1081 case nvme_cmd_write:
1082 case nvme_cmd_read:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001083 case nvme_cmd_compare:
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001084 nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr,
1085 length, &sg);
Matthew Wilcox64132142011-08-09 12:56:37 -04001086 break;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001087 default:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001088 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001089 }
1090
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001091 if (nents < 0)
1092 return nents;
1093
1094 memset(&c, 0, sizeof(c));
1095 c.rw.opcode = io.opcode;
1096 c.rw.flags = io.flags;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001097 c.rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001098 c.rw.slba = cpu_to_le64(io.slba);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001099 c.rw.length = cpu_to_le16(io.nblocks);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001100 c.rw.control = cpu_to_le16(io.control);
1101 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001102 c.rw.reftag = io.reftag;
1103 c.rw.apptag = io.apptag;
1104 c.rw.appmask = io.appmask;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001105 /* XXX: metadata */
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001106 prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001107
Matthew Wilcoxd5677602011-02-10 10:47:55 -05001108 nvmeq = get_nvmeq(ns);
Matthew Wilcoxfa922822011-03-16 16:29:00 -04001109 /*
1110 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
Matthew Wilcoxb1ad37e2011-02-04 16:14:30 -05001111 * disabled. We may be preempted at any point, and be rescheduled
1112 * to a different CPU. That will cause cacheline bouncing, but no
1113 * additional races since q_lock already protects against other CPUs.
1114 */
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001115 put_nvmeq(nvmeq);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001116 if (length != (io.nblocks + 1) << ns->lba_shift)
1117 status = -ENOMEM;
1118 else
1119 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001120
Nisheeth Bhatd1a490e2011-09-15 16:52:24 -04001121 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg);
Matthew Wilcoxd5677602011-02-10 10:47:55 -05001122 nvme_free_prps(dev, prps);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001123 return status;
1124}
1125
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001126static int nvme_user_admin_cmd(struct nvme_ns *ns,
1127 struct nvme_admin_cmd __user *ucmd)
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001128{
1129 struct nvme_dev *dev = ns->dev;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001130 struct nvme_admin_cmd cmd;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001131 struct nvme_command c;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001132 int status, length, nents = 0;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001133 struct scatterlist *sg;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001134 struct nvme_prps *prps = NULL;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001135
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001136 if (!capable(CAP_SYS_ADMIN))
1137 return -EACCES;
1138 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001139 return -EFAULT;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001140
1141 memset(&c, 0, sizeof(c));
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001142 c.common.opcode = cmd.opcode;
1143 c.common.flags = cmd.flags;
1144 c.common.nsid = cpu_to_le32(cmd.nsid);
1145 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1146 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1147 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1148 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1149 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1150 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1151 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1152 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1153
1154 length = cmd.data_len;
1155 if (cmd.data_len) {
1156 nents = nvme_map_user_pages(dev, 1, cmd.addr, length, &sg);
1157 if (nents < 0)
1158 return nents;
1159 prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
1160 }
1161
1162 if (length != cmd.data_len)
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001163 status = -ENOMEM;
1164 else
1165 status = nvme_submit_admin_cmd(dev, &c, NULL);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001166 if (cmd.data_len) {
Nisheeth Bhatd1a490e2011-09-15 16:52:24 -04001167 nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001168 nvme_free_prps(dev, prps);
1169 }
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001170 return status;
1171}
1172
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001173static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1174 unsigned long arg)
1175{
1176 struct nvme_ns *ns = bdev->bd_disk->private_data;
1177
1178 switch (cmd) {
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001179 case NVME_IOCTL_ID:
1180 return ns->ns_id;
1181 case NVME_IOCTL_ADMIN_CMD:
1182 return nvme_user_admin_cmd(ns, (void __user *)arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001183 case NVME_IOCTL_SUBMIT_IO:
1184 return nvme_submit_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001185 default:
1186 return -ENOTTY;
1187 }
1188}
1189
1190static const struct block_device_operations nvme_fops = {
1191 .owner = THIS_MODULE,
1192 .ioctl = nvme_ioctl,
Matthew Wilcox49481682011-03-19 14:55:38 -04001193 .compat_ioctl = nvme_ioctl,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001194};
1195
Matthew Wilcox8de05532011-05-12 13:50:28 -04001196static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1197{
1198 int depth = nvmeq->q_depth - 1;
1199 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1200 unsigned long now = jiffies;
1201 int cmdid;
1202
1203 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1204 unsigned long data;
1205 void *ptr;
1206 unsigned char handler;
1207 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1208
1209 if (!time_after(now, info[cmdid].timeout))
1210 continue;
1211 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1212 data = cancel_cmdid(nvmeq, cmdid);
1213 handler = data & 3;
1214 ptr = (void *)(data & ~3UL);
1215 nvme_completions[handler](nvmeq, ptr, &cqe);
1216 }
1217}
1218
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001219static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1220{
1221 while (bio_list_peek(&nvmeq->sq_cong)) {
1222 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1223 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1224 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1225 bio_list_add_head(&nvmeq->sq_cong, bio);
1226 break;
1227 }
Matthew Wilcox3cb967c2011-03-16 16:45:49 -04001228 if (bio_list_empty(&nvmeq->sq_cong))
1229 remove_wait_queue(&nvmeq->sq_full,
1230 &nvmeq->sq_cong_wait);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001231 }
1232}
1233
1234static int nvme_kthread(void *data)
1235{
1236 struct nvme_dev *dev;
1237
1238 while (!kthread_should_stop()) {
1239 __set_current_state(TASK_RUNNING);
1240 spin_lock(&dev_list_lock);
1241 list_for_each_entry(dev, &dev_list, node) {
1242 int i;
1243 for (i = 0; i < dev->queue_count; i++) {
1244 struct nvme_queue *nvmeq = dev->queues[i];
Matthew Wilcox740216f2011-02-15 16:28:20 -05001245 if (!nvmeq)
1246 continue;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001247 spin_lock_irq(&nvmeq->q_lock);
1248 if (nvme_process_cq(nvmeq))
1249 printk("process_cq did something\n");
Matthew Wilcox8de05532011-05-12 13:50:28 -04001250 nvme_timeout_ios(nvmeq);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001251 nvme_resubmit_bios(nvmeq);
1252 spin_unlock_irq(&nvmeq->q_lock);
1253 }
1254 }
1255 spin_unlock(&dev_list_lock);
1256 set_current_state(TASK_INTERRUPTIBLE);
1257 schedule_timeout(HZ);
1258 }
1259 return 0;
1260}
1261
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001262static DEFINE_IDA(nvme_index_ida);
1263
1264static int nvme_get_ns_idx(void)
1265{
1266 int index, error;
1267
1268 do {
1269 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1270 return -1;
1271
1272 spin_lock(&dev_list_lock);
1273 error = ida_get_new(&nvme_index_ida, &index);
1274 spin_unlock(&dev_list_lock);
1275 } while (error == -EAGAIN);
1276
1277 if (error)
1278 index = -1;
1279 return index;
1280}
1281
1282static void nvme_put_ns_idx(int index)
1283{
1284 spin_lock(&dev_list_lock);
1285 ida_remove(&nvme_index_ida, index);
1286 spin_unlock(&dev_list_lock);
1287}
1288
1289static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001290 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1291{
1292 struct nvme_ns *ns;
1293 struct gendisk *disk;
1294 int lbaf;
1295
1296 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1297 return NULL;
1298
1299 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1300 if (!ns)
1301 return NULL;
1302 ns->queue = blk_alloc_queue(GFP_KERNEL);
1303 if (!ns->queue)
1304 goto out_free_ns;
1305 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
1306 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
1307 blk_queue_make_request(ns->queue, nvme_make_request);
1308 ns->dev = dev;
1309 ns->queue->queuedata = ns;
1310
1311 disk = alloc_disk(NVME_MINORS);
1312 if (!disk)
1313 goto out_free_queue;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001314 ns->ns_id = nsid;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001315 ns->disk = disk;
1316 lbaf = id->flbas & 0xf;
1317 ns->lba_shift = id->lbaf[lbaf].ds;
1318
1319 disk->major = nvme_major;
1320 disk->minors = NVME_MINORS;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001321 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001322 disk->fops = &nvme_fops;
1323 disk->private_data = ns;
1324 disk->queue = ns->queue;
Matthew Wilcox388f0372011-02-01 12:49:38 -05001325 disk->driverfs_dev = &dev->pci_dev->dev;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001326 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001327 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1328
1329 return ns;
1330
1331 out_free_queue:
1332 blk_cleanup_queue(ns->queue);
1333 out_free_ns:
1334 kfree(ns);
1335 return NULL;
1336}
1337
1338static void nvme_ns_free(struct nvme_ns *ns)
1339{
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001340 int index = ns->disk->first_minor / NVME_MINORS;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001341 put_disk(ns->disk);
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001342 nvme_put_ns_idx(index);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001343 blk_cleanup_queue(ns->queue);
1344 kfree(ns);
1345}
1346
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001347static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001348{
1349 int status;
1350 u32 result;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001351 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001352
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001353 status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1354 &result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001355 if (status)
1356 return -EIO;
1357 return min(result & 0xffff, result >> 16) + 1;
1358}
1359
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001360static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1361{
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001362 int result, cpu, i, nr_io_queues, db_bar_size;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001363
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001364 nr_io_queues = num_online_cpus();
1365 result = set_queue_count(dev, nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001366 if (result < 0)
1367 return result;
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001368 if (result < nr_io_queues)
1369 nr_io_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001370
Matthew Wilcox1b234842011-01-20 13:01:49 -05001371 /* Deregister the admin queue's interrupt */
1372 free_irq(dev->entry[0].vector, dev->queues[0]);
1373
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001374 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1375 if (db_bar_size > 8192) {
1376 iounmap(dev->bar);
1377 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
1378 db_bar_size);
1379 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1380 dev->queues[0]->q_db = dev->dbs;
1381 }
1382
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001383 for (i = 0; i < nr_io_queues; i++)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001384 dev->entry[i].entry = i;
1385 for (;;) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001386 result = pci_enable_msix(dev->pci_dev, dev->entry,
1387 nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001388 if (result == 0) {
1389 break;
1390 } else if (result > 0) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001391 nr_io_queues = result;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001392 continue;
1393 } else {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001394 nr_io_queues = 1;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001395 break;
1396 }
1397 }
1398
1399 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1400 /* XXX: handle failure here */
1401
1402 cpu = cpumask_first(cpu_online_mask);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001403 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001404 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1405 cpu = cpumask_next(cpu, cpu_online_mask);
1406 }
1407
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001408 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001409 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
1410 NVME_Q_DEPTH, i);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001411 if (IS_ERR(dev->queues[i + 1]))
1412 return PTR_ERR(dev->queues[i + 1]);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001413 dev->queue_count++;
1414 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001415
Matthew Wilcox9ecdc942011-03-16 16:52:19 -04001416 for (; i < num_possible_cpus(); i++) {
1417 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1418 dev->queues[i + 1] = dev->queues[target + 1];
1419 }
1420
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001421 return 0;
1422}
1423
1424static void nvme_free_queues(struct nvme_dev *dev)
1425{
1426 int i;
1427
1428 for (i = dev->queue_count - 1; i >= 0; i--)
1429 nvme_free_queue(dev, i);
1430}
1431
1432static int __devinit nvme_dev_add(struct nvme_dev *dev)
1433{
1434 int res, nn, i;
1435 struct nvme_ns *ns, *next;
Matthew Wilcox51814232011-02-01 16:18:08 -05001436 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001437 struct nvme_id_ns *id_ns;
1438 void *mem;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001439 dma_addr_t dma_addr;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001440
1441 res = nvme_setup_io_queues(dev);
1442 if (res)
1443 return res;
1444
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001445 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001446 GFP_KERNEL);
1447
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001448 res = nvme_identify(dev, 0, 1, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001449 if (res) {
1450 res = -EIO;
1451 goto out_free;
1452 }
1453
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001454 ctrl = mem;
Matthew Wilcox51814232011-02-01 16:18:08 -05001455 nn = le32_to_cpup(&ctrl->nn);
1456 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1457 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1458 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001459
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001460 id_ns = mem;
Matthew Wilcox2b2c1892011-10-07 13:10:13 -04001461 for (i = 1; i <= nn; i++) {
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001462 res = nvme_identify(dev, i, 0, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001463 if (res)
1464 continue;
1465
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001466 if (id_ns->ncap == 0)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001467 continue;
1468
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001469 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1470 dma_addr + 4096, NULL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001471 if (res)
1472 continue;
1473
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001474 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001475 if (ns)
1476 list_add_tail(&ns->list, &dev->namespaces);
1477 }
1478 list_for_each_entry(ns, &dev->namespaces, list)
1479 add_disk(ns->disk);
1480
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001481 goto out;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001482
1483 out_free:
1484 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1485 list_del(&ns->list);
1486 nvme_ns_free(ns);
1487 }
1488
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001489 out:
Matthew Wilcox684f5c22011-09-19 17:14:53 -04001490 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001491 return res;
1492}
1493
1494static int nvme_dev_remove(struct nvme_dev *dev)
1495{
1496 struct nvme_ns *ns, *next;
1497
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001498 spin_lock(&dev_list_lock);
1499 list_del(&dev->node);
1500 spin_unlock(&dev_list_lock);
1501
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001502 /* TODO: wait all I/O finished or cancel them */
1503
1504 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1505 list_del(&ns->list);
1506 del_gendisk(ns->disk);
1507 nvme_ns_free(ns);
1508 }
1509
1510 nvme_free_queues(dev);
1511
1512 return 0;
1513}
1514
Matthew Wilcox091b6092011-02-10 09:56:01 -05001515static int nvme_setup_prp_pools(struct nvme_dev *dev)
1516{
1517 struct device *dmadev = &dev->pci_dev->dev;
1518 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1519 PAGE_SIZE, PAGE_SIZE, 0);
1520 if (!dev->prp_page_pool)
1521 return -ENOMEM;
1522
Matthew Wilcox99802a72011-02-10 10:30:34 -05001523 /* Optimisation for I/Os between 4k and 128k */
1524 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1525 256, 256, 0);
1526 if (!dev->prp_small_pool) {
1527 dma_pool_destroy(dev->prp_page_pool);
1528 return -ENOMEM;
1529 }
Matthew Wilcox091b6092011-02-10 09:56:01 -05001530 return 0;
1531}
1532
1533static void nvme_release_prp_pools(struct nvme_dev *dev)
1534{
1535 dma_pool_destroy(dev->prp_page_pool);
Matthew Wilcox99802a72011-02-10 10:30:34 -05001536 dma_pool_destroy(dev->prp_small_pool);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001537}
1538
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001539/* XXX: Use an ida or something to let remove / add work correctly */
1540static void nvme_set_instance(struct nvme_dev *dev)
1541{
1542 static int instance;
1543 dev->instance = instance++;
1544}
1545
1546static void nvme_release_instance(struct nvme_dev *dev)
1547{
1548}
1549
1550static int __devinit nvme_probe(struct pci_dev *pdev,
1551 const struct pci_device_id *id)
1552{
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001553 int bars, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001554 struct nvme_dev *dev;
1555
1556 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1557 if (!dev)
1558 return -ENOMEM;
1559 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1560 GFP_KERNEL);
1561 if (!dev->entry)
1562 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001563 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1564 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001565 if (!dev->queues)
1566 goto free;
1567
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001568 if (pci_enable_device_mem(pdev))
1569 goto free;
Matthew Wilcoxf64d3362011-02-01 09:01:59 -05001570 pci_set_master(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001571 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1572 if (pci_request_selected_regions(pdev, bars, "nvme"))
1573 goto disable;
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001574
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001575 INIT_LIST_HEAD(&dev->namespaces);
1576 dev->pci_dev = pdev;
1577 pci_set_drvdata(pdev, dev);
Matthew Wilcox29303532011-02-01 16:23:39 -05001578 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1579 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001580 nvme_set_instance(dev);
Matthew Wilcox53c95772011-01-20 13:42:34 -05001581 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001582
Matthew Wilcox091b6092011-02-10 09:56:01 -05001583 result = nvme_setup_prp_pools(dev);
1584 if (result)
1585 goto disable_msix;
1586
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001587 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1588 if (!dev->bar) {
1589 result = -ENOMEM;
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001590 goto disable_msix;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001591 }
1592
1593 result = nvme_configure_admin_queue(dev);
1594 if (result)
1595 goto unmap;
1596 dev->queue_count++;
1597
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001598 spin_lock(&dev_list_lock);
1599 list_add(&dev->node, &dev_list);
1600 spin_unlock(&dev_list_lock);
1601
Matthew Wilcox740216f2011-02-15 16:28:20 -05001602 result = nvme_dev_add(dev);
1603 if (result)
1604 goto delete;
1605
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001606 return 0;
1607
1608 delete:
Matthew Wilcox740216f2011-02-15 16:28:20 -05001609 spin_lock(&dev_list_lock);
1610 list_del(&dev->node);
1611 spin_unlock(&dev_list_lock);
1612
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001613 nvme_free_queues(dev);
1614 unmap:
1615 iounmap(dev->bar);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001616 disable_msix:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001617 pci_disable_msix(pdev);
1618 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001619 nvme_release_prp_pools(dev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001620 disable:
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001621 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001622 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001623 free:
1624 kfree(dev->queues);
1625 kfree(dev->entry);
1626 kfree(dev);
1627 return result;
1628}
1629
1630static void __devexit nvme_remove(struct pci_dev *pdev)
1631{
1632 struct nvme_dev *dev = pci_get_drvdata(pdev);
1633 nvme_dev_remove(dev);
1634 pci_disable_msix(pdev);
1635 iounmap(dev->bar);
1636 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001637 nvme_release_prp_pools(dev);
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001638 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001639 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001640 kfree(dev->queues);
1641 kfree(dev->entry);
1642 kfree(dev);
1643}
1644
1645/* These functions are yet to be implemented */
1646#define nvme_error_detected NULL
1647#define nvme_dump_registers NULL
1648#define nvme_link_reset NULL
1649#define nvme_slot_reset NULL
1650#define nvme_error_resume NULL
1651#define nvme_suspend NULL
1652#define nvme_resume NULL
1653
1654static struct pci_error_handlers nvme_err_handler = {
1655 .error_detected = nvme_error_detected,
1656 .mmio_enabled = nvme_dump_registers,
1657 .link_reset = nvme_link_reset,
1658 .slot_reset = nvme_slot_reset,
1659 .resume = nvme_error_resume,
1660};
1661
1662/* Move to pci_ids.h later */
1663#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1664
1665static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1666 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1667 { 0, }
1668};
1669MODULE_DEVICE_TABLE(pci, nvme_id_table);
1670
1671static struct pci_driver nvme_driver = {
1672 .name = "nvme",
1673 .id_table = nvme_id_table,
1674 .probe = nvme_probe,
1675 .remove = __devexit_p(nvme_remove),
1676 .suspend = nvme_suspend,
1677 .resume = nvme_resume,
1678 .err_handler = &nvme_err_handler,
1679};
1680
1681static int __init nvme_init(void)
1682{
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001683 int result = -EBUSY;
1684
1685 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1686 if (IS_ERR(nvme_thread))
1687 return PTR_ERR(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001688
1689 nvme_major = register_blkdev(nvme_major, "nvme");
1690 if (nvme_major <= 0)
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001691 goto kill_kthread;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001692
1693 result = pci_register_driver(&nvme_driver);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001694 if (result)
1695 goto unregister_blkdev;
1696 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001697
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001698 unregister_blkdev:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001699 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001700 kill_kthread:
1701 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001702 return result;
1703}
1704
1705static void __exit nvme_exit(void)
1706{
1707 pci_unregister_driver(&nvme_driver);
1708 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001709 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001710}
1711
1712MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1713MODULE_LICENSE("GPL");
Matthew Wilcoxce38c142011-10-07 13:20:37 -04001714MODULE_VERSION("0.7");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001715module_init(nvme_init);
1716module_exit(nvme_exit);