blob: eb4a91f3bf4169cffd9170312d4b2fdfae891703 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
Matthew Wilcox8de05532011-05-12 13:50:28 -040021#include <linux/bitops.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022#include <linux/blkdev.h>
Matthew Wilcoxfd63e9ce2011-05-06 08:37:54 -040023#include <linux/delay.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050024#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
Matthew Wilcox5aff9382011-05-06 08:45:47 -040027#include <linux/idr.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050032#include <linux/kthread.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050033#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -050038#include <linux/poison.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050039#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
Vishal Verma5d0f6132013-03-04 18:40:58 -070042#include <scsi/sg.h>
Hitoshi Mitake797a7962012-02-07 11:45:33 +090043#include <asm-generic/io-64-nonatomic-lo-hi.h>
44
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050045#define NVME_Q_DEPTH 1024
46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
48#define NVME_MINORS 64
Matthew Wilcoxe85248e2011-02-06 18:30:16 -050049#define ADMIN_TIMEOUT (60 * HZ)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050050
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
Matthew Wilcox58ffacb2011-02-06 07:28:06 -050054static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050057static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050061/*
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050062 * An NVM Express queue. Each device has at least two (one for admin
63 * commands and one for I/O commands).
64 */
65struct nvme_queue {
66 struct device *q_dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -050067 struct nvme_dev *dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050068 spinlock_t q_lock;
69 struct nvme_command *sq_cmds;
70 volatile struct nvme_completion *cqes;
71 dma_addr_t sq_dma_addr;
72 dma_addr_t cq_dma_addr;
73 wait_queue_head_t sq_full;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050074 wait_queue_t sq_cong_wait;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050075 struct bio_list sq_cong;
76 u32 __iomem *q_db;
77 u16 q_depth;
78 u16 cq_vector;
79 u16 sq_head;
80 u16 sq_tail;
81 u16 cq_head;
Matthew Wilcoxe9539f42013-06-24 11:47:34 -040082 u8 cq_phase;
83 u8 cqe_seen;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050084 unsigned long cmdid_data[];
85};
86
87/*
88 * Check we didin't inadvertently grow the command struct
89 */
90static inline void _nvme_check_size(void)
91{
92 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
93 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
94 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
95 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
96 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
Vishal Vermaf8ebf842013-03-27 07:13:41 -040097 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050098 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
99 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
100 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
101 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
Keith Busch6ecec742012-09-26 12:49:27 -0600102 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500103}
104
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500105typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400106 struct nvme_completion *);
107
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500108struct nvme_cmd_info {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400109 nvme_completion_fn fn;
110 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500111 unsigned long timeout;
112};
113
114static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
115{
116 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
117}
118
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500119/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400120 * alloc_cmdid() - Allocate a Command ID
121 * @nvmeq: The queue that will be used for this command
122 * @ctx: A pointer that will be passed to the handler
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400123 * @handler: The function to call on completion
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500124 *
125 * Allocate a Command ID for a queue. The data passed in will
126 * be passed to the completion handler. This is implemented by using
127 * the bottom two bits of the ctx pointer to store the handler ID.
128 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
129 * We can change this if it becomes a problem.
Matthew Wilcox184d2942011-05-11 21:36:38 -0400130 *
131 * May be called with local interrupts disabled and the q_lock held,
132 * or with interrupts enabled and no locks held.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500133 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400134static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
135 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500136{
Matthew Wilcoxe6d15f72011-02-24 08:49:41 -0500137 int depth = nvmeq->q_depth - 1;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500138 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500139 int cmdid;
140
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500141 do {
142 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
143 if (cmdid >= depth)
144 return -EBUSY;
145 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
146
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400147 info[cmdid].fn = handler;
148 info[cmdid].ctx = ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500149 info[cmdid].timeout = jiffies + timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500150 return cmdid;
151}
152
153static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400154 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500155{
156 int cmdid;
157 wait_event_killable(nvmeq->sq_full,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500158 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500159 return (cmdid < 0) ? -EINTR : cmdid;
160}
161
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400162/* Special values must be less than 0x1000 */
163#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
Matthew Wilcoxd2d87032011-02-07 15:55:59 -0500164#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
165#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
166#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500167#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500168
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500169static void special_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400170 struct nvme_completion *cqe)
171{
172 if (ctx == CMD_CTX_CANCELLED)
173 return;
174 if (ctx == CMD_CTX_FLUSH)
175 return;
176 if (ctx == CMD_CTX_COMPLETED) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500177 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400178 "completed id %d twice on queue %d\n",
179 cqe->command_id, le16_to_cpup(&cqe->sq_id));
180 return;
181 }
182 if (ctx == CMD_CTX_INVALID) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500183 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400184 "invalid id %d completed on queue %d\n",
185 cqe->command_id, le16_to_cpup(&cqe->sq_id));
186 return;
187 }
188
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500189 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400190}
191
Matthew Wilcox184d2942011-05-11 21:36:38 -0400192/*
193 * Called with local interrupts disabled and the q_lock held. May not sleep.
194 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400195static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
196 nvme_completion_fn *fn)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500197{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400198 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500199 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500200
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400201 if (cmdid >= nvmeq->q_depth) {
202 *fn = special_completion;
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500203 return CMD_CTX_INVALID;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400204 }
Keith Busch859361a2012-08-02 14:05:59 -0600205 if (fn)
206 *fn = info[cmdid].fn;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400207 ctx = info[cmdid].ctx;
208 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500209 info[cmdid].ctx = CMD_CTX_COMPLETED;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500210 clear_bit(cmdid, nvmeq->cmdid_data);
211 wake_up(&nvmeq->sq_full);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400212 return ctx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500213}
214
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400215static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
216 nvme_completion_fn *fn)
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500217{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400218 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500219 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400220 if (fn)
221 *fn = info[cmdid].fn;
222 ctx = info[cmdid].ctx;
223 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500224 info[cmdid].ctx = CMD_CTX_CANCELLED;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400225 return ctx;
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500226}
227
Vishal Verma5d0f6132013-03-04 18:40:58 -0700228struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500229{
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500230 return dev->queues[get_cpu() + 1];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500231}
232
Vishal Verma5d0f6132013-03-04 18:40:58 -0700233void put_nvmeq(struct nvme_queue *nvmeq)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500234{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500235 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500236}
237
238/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400239 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500240 * @nvmeq: The queue to use
241 * @cmd: The command to send
242 *
243 * Safe to use from interrupt context
244 */
245static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
246{
247 unsigned long flags;
248 u16 tail;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500249 spin_lock_irqsave(&nvmeq->q_lock, flags);
250 tail = nvmeq->sq_tail;
251 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500252 if (++tail == nvmeq->q_depth)
253 tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500254 writel(tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500255 nvmeq->sq_tail = tail;
256 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
257
258 return 0;
259}
260
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500261static __le64 **iod_list(struct nvme_iod *iod)
262{
263 return ((void *)iod) + iod->offset;
264}
265
266/*
267 * Will slightly overestimate the number of pages needed. This is OK
268 * as it only leads to a small amount of wasted memory for the lifetime of
269 * the I/O.
270 */
271static int nvme_npages(unsigned size)
272{
273 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
274 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
275}
276
277static struct nvme_iod *
278nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
279{
280 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
281 sizeof(__le64 *) * nvme_npages(nbytes) +
282 sizeof(struct scatterlist) * nseg, gfp);
283
284 if (iod) {
285 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
286 iod->npages = -1;
287 iod->length = nbytes;
Keith Busch2b196032012-11-06 11:59:23 -0700288 iod->nents = 0;
Keith Busch61982212013-05-29 15:59:39 -0600289 iod->start_time = jiffies;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500290 }
291
292 return iod;
293}
294
Vishal Verma5d0f6132013-03-04 18:40:58 -0700295void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500296{
297 const int last_prp = PAGE_SIZE / 8 - 1;
298 int i;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500299 __le64 **list = iod_list(iod);
300 dma_addr_t prp_dma = iod->first_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500301
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500302 if (iod->npages == 0)
303 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
304 for (i = 0; i < iod->npages; i++) {
305 __le64 *prp_list = list[i];
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500306 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
Matthew Wilcox091b6092011-02-10 09:56:01 -0500307 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500308 prp_dma = next_prp_dma;
309 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500310 kfree(iod);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500311}
312
Keith Busch61982212013-05-29 15:59:39 -0600313static void nvme_start_io_acct(struct bio *bio)
314{
315 struct gendisk *disk = bio->bi_bdev->bd_disk;
316 const int rw = bio_data_dir(bio);
317 int cpu = part_stat_lock();
318 part_round_stats(cpu, &disk->part0);
319 part_stat_inc(cpu, &disk->part0, ios[rw]);
320 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
321 part_inc_in_flight(&disk->part0, rw);
322 part_stat_unlock();
323}
324
325static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
326{
327 struct gendisk *disk = bio->bi_bdev->bd_disk;
328 const int rw = bio_data_dir(bio);
329 unsigned long duration = jiffies - start_time;
330 int cpu = part_stat_lock();
331 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
332 part_round_stats(cpu, &disk->part0);
333 part_dec_in_flight(&disk->part0, rw);
334 part_stat_unlock();
335}
336
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500337static void bio_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500338 struct nvme_completion *cqe)
339{
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500340 struct nvme_iod *iod = ctx;
341 struct bio *bio = iod->private;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500342 u16 status = le16_to_cpup(&cqe->status) >> 1;
343
Keith Busch2b196032012-11-06 11:59:23 -0700344 if (iod->nents)
345 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500346 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Keith Busch61982212013-05-29 15:59:39 -0600347
348 nvme_end_io_acct(bio, iod->start_time);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500349 nvme_free_iod(dev, iod);
Keith Busch427e9702013-04-09 11:59:32 -0600350 if (status)
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500351 bio_endio(bio, -EIO);
Keith Busch427e9702013-04-09 11:59:32 -0600352 else
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500353 bio_endio(bio, 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500354}
355
Matthew Wilcox184d2942011-05-11 21:36:38 -0400356/* length is in bytes. gfp flags indicates whether we may sleep. */
Vishal Verma5d0f6132013-03-04 18:40:58 -0700357int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
358 struct nvme_iod *iod, int total_len, gfp_t gfp)
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500359{
Matthew Wilcox99802a72011-02-10 10:30:34 -0500360 struct dma_pool *pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500361 int length = total_len;
362 struct scatterlist *sg = iod->sg;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500363 int dma_len = sg_dma_len(sg);
364 u64 dma_addr = sg_dma_address(sg);
365 int offset = offset_in_page(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500366 __le64 *prp_list;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500367 __le64 **list = iod_list(iod);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500368 dma_addr_t prp_dma;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500369 int nprps, i;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500370
371 cmd->prp1 = cpu_to_le64(dma_addr);
372 length -= (PAGE_SIZE - offset);
373 if (length <= 0)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500374 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500375
376 dma_len -= (PAGE_SIZE - offset);
377 if (dma_len) {
378 dma_addr += (PAGE_SIZE - offset);
379 } else {
380 sg = sg_next(sg);
381 dma_addr = sg_dma_address(sg);
382 dma_len = sg_dma_len(sg);
383 }
384
385 if (length <= PAGE_SIZE) {
386 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500387 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500388 }
389
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500390 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
Matthew Wilcox99802a72011-02-10 10:30:34 -0500391 if (nprps <= (256 / 8)) {
392 pool = dev->prp_small_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500393 iod->npages = 0;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500394 } else {
395 pool = dev->prp_page_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500396 iod->npages = 1;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500397 }
398
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400399 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
400 if (!prp_list) {
401 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500402 iod->npages = -1;
403 return (total_len - length) + PAGE_SIZE;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400404 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500405 list[0] = prp_list;
406 iod->first_dma = prp_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500407 cmd->prp2 = cpu_to_le64(prp_dma);
408 i = 0;
409 for (;;) {
Matthew Wilcox7523d832011-03-16 16:43:40 -0400410 if (i == PAGE_SIZE / 8) {
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500411 __le64 *old_prp_list = prp_list;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400412 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500413 if (!prp_list)
414 return total_len - length;
415 list[iod->npages++] = prp_list;
Matthew Wilcox7523d832011-03-16 16:43:40 -0400416 prp_list[0] = old_prp_list[i - 1];
417 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
418 i = 1;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500419 }
420 prp_list[i++] = cpu_to_le64(dma_addr);
421 dma_len -= PAGE_SIZE;
422 dma_addr += PAGE_SIZE;
423 length -= PAGE_SIZE;
424 if (length <= 0)
425 break;
426 if (dma_len > 0)
427 continue;
428 BUG_ON(dma_len < 0);
429 sg = sg_next(sg);
430 dma_addr = sg_dma_address(sg);
431 dma_len = sg_dma_len(sg);
432 }
433
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500434 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500435}
436
Keith Busch427e9702013-04-09 11:59:32 -0600437struct nvme_bio_pair {
438 struct bio b1, b2, *parent;
439 struct bio_vec *bv1, *bv2;
440 int err;
441 atomic_t cnt;
442};
443
444static void nvme_bio_pair_endio(struct bio *bio, int err)
445{
446 struct nvme_bio_pair *bp = bio->bi_private;
447
448 if (err)
449 bp->err = err;
450
451 if (atomic_dec_and_test(&bp->cnt)) {
452 bio_endio(bp->parent, bp->err);
453 if (bp->bv1)
454 kfree(bp->bv1);
455 if (bp->bv2)
456 kfree(bp->bv2);
457 kfree(bp);
458 }
459}
460
461static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
462 int len, int offset)
463{
464 struct nvme_bio_pair *bp;
465
466 BUG_ON(len > bio->bi_size);
467 BUG_ON(idx > bio->bi_vcnt);
468
469 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
470 if (!bp)
471 return NULL;
472 bp->err = 0;
473
474 bp->b1 = *bio;
475 bp->b2 = *bio;
476
477 bp->b1.bi_size = len;
478 bp->b2.bi_size -= len;
479 bp->b1.bi_vcnt = idx;
480 bp->b2.bi_idx = idx;
481 bp->b2.bi_sector += len >> 9;
482
483 if (offset) {
484 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
485 GFP_ATOMIC);
486 if (!bp->bv1)
487 goto split_fail_1;
488
489 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
490 GFP_ATOMIC);
491 if (!bp->bv2)
492 goto split_fail_2;
493
494 memcpy(bp->bv1, bio->bi_io_vec,
495 bio->bi_max_vecs * sizeof(struct bio_vec));
496 memcpy(bp->bv2, bio->bi_io_vec,
497 bio->bi_max_vecs * sizeof(struct bio_vec));
498
499 bp->b1.bi_io_vec = bp->bv1;
500 bp->b2.bi_io_vec = bp->bv2;
501 bp->b2.bi_io_vec[idx].bv_offset += offset;
502 bp->b2.bi_io_vec[idx].bv_len -= offset;
503 bp->b1.bi_io_vec[idx].bv_len = offset;
504 bp->b1.bi_vcnt++;
505 } else
506 bp->bv1 = bp->bv2 = NULL;
507
508 bp->b1.bi_private = bp;
509 bp->b2.bi_private = bp;
510
511 bp->b1.bi_end_io = nvme_bio_pair_endio;
512 bp->b2.bi_end_io = nvme_bio_pair_endio;
513
514 bp->parent = bio;
515 atomic_set(&bp->cnt, 2);
516
517 return bp;
518
519 split_fail_2:
520 kfree(bp->bv1);
521 split_fail_1:
522 kfree(bp);
523 return NULL;
524}
525
526static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
527 int idx, int len, int offset)
528{
529 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
530 if (!bp)
531 return -ENOMEM;
532
533 if (bio_list_empty(&nvmeq->sq_cong))
534 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
535 bio_list_add(&nvmeq->sq_cong, &bp->b1);
536 bio_list_add(&nvmeq->sq_cong, &bp->b2);
537
538 return 0;
539}
540
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500541/* NVMe scatterlists require no holes in the virtual address */
542#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
543 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
544
Keith Busch427e9702013-04-09 11:59:32 -0600545static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500546 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
547{
Matthew Wilcox76830842011-02-10 13:55:39 -0500548 struct bio_vec *bvec, *bvprv = NULL;
549 struct scatterlist *sg = NULL;
Keith Busch159b67d2013-04-09 17:13:20 -0600550 int i, length = 0, nsegs = 0, split_len = bio->bi_size;
551
552 if (nvmeq->dev->stripe_size)
553 split_len = nvmeq->dev->stripe_size -
554 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500555
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500556 sg_init_table(iod->sg, psegs);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500557 bio_for_each_segment(bvec, bio, i) {
Matthew Wilcox76830842011-02-10 13:55:39 -0500558 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
559 sg->length += bvec->bv_len;
560 } else {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500561 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
Keith Busch427e9702013-04-09 11:59:32 -0600562 return nvme_split_and_submit(bio, nvmeq, i,
563 length, 0);
564
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500565 sg = sg ? sg + 1 : iod->sg;
Matthew Wilcox76830842011-02-10 13:55:39 -0500566 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
567 bvec->bv_offset);
568 nsegs++;
569 }
Keith Busch159b67d2013-04-09 17:13:20 -0600570
571 if (split_len - length < bvec->bv_len)
572 return nvme_split_and_submit(bio, nvmeq, i, split_len,
573 split_len - length);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500574 length += bvec->bv_len;
Matthew Wilcox76830842011-02-10 13:55:39 -0500575 bvprv = bvec;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500576 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500577 iod->nents = nsegs;
Matthew Wilcox76830842011-02-10 13:55:39 -0500578 sg_mark_end(sg);
Keith Busch427e9702013-04-09 11:59:32 -0600579 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500580 return -ENOMEM;
Keith Busch427e9702013-04-09 11:59:32 -0600581
Keith Busch159b67d2013-04-09 17:13:20 -0600582 BUG_ON(length != bio->bi_size);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500583 return length;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500584}
585
Keith Busch0e5e4f02012-11-09 16:33:05 -0700586/*
587 * We reuse the small pool to allocate the 16-byte range here as it is not
588 * worth having a special pool for these or additional cases to handle freeing
589 * the iod.
590 */
591static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
592 struct bio *bio, struct nvme_iod *iod, int cmdid)
593{
594 struct nvme_dsm_range *range;
595 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
596
597 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
598 &iod->first_dma);
599 if (!range)
600 return -ENOMEM;
601
602 iod_list(iod)[0] = (__le64 *)range;
603 iod->npages = 0;
604
605 range->cattr = cpu_to_le32(0);
606 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400607 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
Keith Busch0e5e4f02012-11-09 16:33:05 -0700608
609 memset(cmnd, 0, sizeof(*cmnd));
610 cmnd->dsm.opcode = nvme_cmd_dsm;
611 cmnd->dsm.command_id = cmdid;
612 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
613 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
614 cmnd->dsm.nr = 0;
615 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
616
617 if (++nvmeq->sq_tail == nvmeq->q_depth)
618 nvmeq->sq_tail = 0;
619 writel(nvmeq->sq_tail, nvmeq->q_db);
620
621 return 0;
622}
623
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500624static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
625 int cmdid)
626{
627 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
628
629 memset(cmnd, 0, sizeof(*cmnd));
630 cmnd->common.opcode = nvme_cmd_flush;
631 cmnd->common.command_id = cmdid;
632 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
633
634 if (++nvmeq->sq_tail == nvmeq->q_depth)
635 nvmeq->sq_tail = 0;
636 writel(nvmeq->sq_tail, nvmeq->q_db);
637
638 return 0;
639}
640
Vishal Verma5d0f6132013-03-04 18:40:58 -0700641int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500642{
643 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
Matthew Wilcoxff976d72011-12-20 13:53:01 -0500644 special_completion, NVME_IO_TIMEOUT);
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500645 if (unlikely(cmdid < 0))
646 return cmdid;
647
648 return nvme_submit_flush(nvmeq, ns, cmdid);
649}
650
Matthew Wilcox184d2942011-05-11 21:36:38 -0400651/*
652 * Called with local interrupts disabled and the q_lock held. May not sleep.
653 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500654static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
655 struct bio *bio)
656{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500657 struct nvme_command *cmnd;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500658 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500659 enum dma_data_direction dma_dir;
Wei Yongjun1287dab2013-05-13 22:29:04 +0800660 int cmdid, length, result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500661 u16 control;
662 u32 dsmgmt;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500663 int psegs = bio_phys_segments(ns->queue, bio);
664
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500665 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
666 result = nvme_submit_flush_data(nvmeq, ns);
667 if (result)
668 return result;
669 }
670
Wei Yongjun1287dab2013-05-13 22:29:04 +0800671 result = -ENOMEM;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500672 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
673 if (!iod)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500674 goto nomem;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500675 iod->private = bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500676
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500677 result = -EBUSY;
Matthew Wilcoxff976d72011-12-20 13:53:01 -0500678 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500679 if (unlikely(cmdid < 0))
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500680 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500681
Keith Busch0e5e4f02012-11-09 16:33:05 -0700682 if (bio->bi_rw & REQ_DISCARD) {
683 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
684 if (result)
685 goto free_cmdid;
686 return result;
687 }
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500688 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
689 return nvme_submit_flush(nvmeq, ns, cmdid);
690
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500691 control = 0;
692 if (bio->bi_rw & REQ_FUA)
693 control |= NVME_RW_FUA;
694 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
695 control |= NVME_RW_LR;
696
697 dsmgmt = 0;
698 if (bio->bi_rw & REQ_RAHEAD)
699 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
700
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500701 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500702
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500703 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500704 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500705 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500706 dma_dir = DMA_TO_DEVICE;
707 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500708 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500709 dma_dir = DMA_FROM_DEVICE;
710 }
711
Keith Busch427e9702013-04-09 11:59:32 -0600712 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
713 if (result <= 0)
Keith Busch859361a2012-08-02 14:05:59 -0600714 goto free_cmdid;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500715 length = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500716
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500717 cmnd->rw.command_id = cmdid;
718 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500719 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
720 GFP_ATOMIC);
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400721 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500722 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500723 cmnd->rw.control = cpu_to_le16(control);
724 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500725
Keith Busch61982212013-05-29 15:59:39 -0600726 nvme_start_io_acct(bio);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500727 if (++nvmeq->sq_tail == nvmeq->q_depth)
728 nvmeq->sq_tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500729 writel(nvmeq->sq_tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500730
Matthew Wilcox1974b1a2011-02-10 12:01:09 -0500731 return 0;
732
Keith Busch859361a2012-08-02 14:05:59 -0600733 free_cmdid:
734 free_cmdid(nvmeq, cmdid, NULL);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500735 free_iod:
736 nvme_free_iod(nvmeq->dev, iod);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500737 nomem:
738 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500739}
740
Linus Torvalds93c3d652012-01-18 15:41:27 -0800741static void nvme_make_request(struct request_queue *q, struct bio *bio)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500742{
743 struct nvme_ns *ns = q->queuedata;
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500744 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500745 int result = -EBUSY;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500746
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500747 spin_lock_irq(&nvmeq->q_lock);
748 if (bio_list_empty(&nvmeq->sq_cong))
749 result = nvme_submit_bio_queue(nvmeq, ns, bio);
750 if (unlikely(result)) {
751 if (bio_list_empty(&nvmeq->sq_cong))
752 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500753 bio_list_add(&nvmeq->sq_cong, bio);
754 }
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500755
756 spin_unlock_irq(&nvmeq->q_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500757 put_nvmeq(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500758}
759
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400760static int nvme_process_cq(struct nvme_queue *nvmeq)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500761{
Matthew Wilcox82123462011-01-20 13:24:06 -0500762 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500763
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500764 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500765 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500766
767 for (;;) {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400768 void *ctx;
769 nvme_completion_fn fn;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500770 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500771 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500772 break;
773 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
774 if (++head == nvmeq->q_depth) {
775 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500776 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500777 }
778
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400779 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500780 fn(nvmeq->dev, ctx, &cqe);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500781 }
782
783 /* If the controller ignores the cq head doorbell and continuously
784 * writes to the queue, it is theoretically possible to wrap around
785 * the queue twice and mistakenly return IRQ_NONE. Linux only
786 * requires that 0.1% of your interrupts are handled, so this isn't
787 * a big problem.
788 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500789 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400790 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500791
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400792 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500793 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500794 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500795
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400796 nvmeq->cqe_seen = 1;
797 return 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500798}
799
800static irqreturn_t nvme_irq(int irq, void *data)
801{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500802 irqreturn_t result;
803 struct nvme_queue *nvmeq = data;
804 spin_lock(&nvmeq->q_lock);
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400805 nvme_process_cq(nvmeq);
806 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
807 nvmeq->cqe_seen = 0;
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500808 spin_unlock(&nvmeq->q_lock);
809 return result;
810}
811
812static irqreturn_t nvme_irq_check(int irq, void *data)
813{
814 struct nvme_queue *nvmeq = data;
815 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
816 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
817 return IRQ_NONE;
818 return IRQ_WAKE_THREAD;
819}
820
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500821static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
822{
823 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400824 cancel_cmdid(nvmeq, cmdid, NULL);
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500825 spin_unlock_irq(&nvmeq->q_lock);
826}
827
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400828struct sync_cmd_info {
829 struct task_struct *task;
830 u32 result;
831 int status;
832};
833
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500834static void sync_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400835 struct nvme_completion *cqe)
836{
837 struct sync_cmd_info *cmdinfo = ctx;
838 cmdinfo->result = le32_to_cpup(&cqe->result);
839 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
840 wake_up_process(cmdinfo->task);
841}
842
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500843/*
844 * Returns 0 on success. If the result is negative, it's a Linux error code;
845 * if the result is positive, it's an NVM Express status code
846 */
Vishal Verma5d0f6132013-03-04 18:40:58 -0700847int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
848 u32 *result, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500849{
850 int cmdid;
851 struct sync_cmd_info cmdinfo;
852
853 cmdinfo.task = current;
854 cmdinfo.status = -EINTR;
855
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400856 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500857 timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500858 if (cmdid < 0)
859 return cmdid;
860 cmd->common.command_id = cmdid;
861
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500862 set_current_state(TASK_KILLABLE);
863 nvme_submit_cmd(nvmeq, cmd);
Keith Busch78f8d252013-04-19 14:11:06 -0600864 schedule_timeout(timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500865
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500866 if (cmdinfo.status == -EINTR) {
867 nvme_abort_command(nvmeq, cmdid);
868 return -EINTR;
869 }
870
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500871 if (result)
872 *result = cmdinfo.result;
873
874 return cmdinfo.status;
875}
876
Vishal Verma5d0f6132013-03-04 18:40:58 -0700877int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500878 u32 *result)
879{
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500880 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500881}
882
883static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
884{
885 int status;
886 struct nvme_command c;
887
888 memset(&c, 0, sizeof(c));
889 c.delete_queue.opcode = opcode;
890 c.delete_queue.qid = cpu_to_le16(id);
891
892 status = nvme_submit_admin_cmd(dev, &c, NULL);
893 if (status)
894 return -EIO;
895 return 0;
896}
897
898static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
899 struct nvme_queue *nvmeq)
900{
901 int status;
902 struct nvme_command c;
903 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
904
905 memset(&c, 0, sizeof(c));
906 c.create_cq.opcode = nvme_admin_create_cq;
907 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
908 c.create_cq.cqid = cpu_to_le16(qid);
909 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
910 c.create_cq.cq_flags = cpu_to_le16(flags);
911 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
912
913 status = nvme_submit_admin_cmd(dev, &c, NULL);
914 if (status)
915 return -EIO;
916 return 0;
917}
918
919static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
920 struct nvme_queue *nvmeq)
921{
922 int status;
923 struct nvme_command c;
924 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
925
926 memset(&c, 0, sizeof(c));
927 c.create_sq.opcode = nvme_admin_create_sq;
928 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
929 c.create_sq.sqid = cpu_to_le16(qid);
930 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
931 c.create_sq.sq_flags = cpu_to_le16(flags);
932 c.create_sq.cqid = cpu_to_le16(qid);
933
934 status = nvme_submit_admin_cmd(dev, &c, NULL);
935 if (status)
936 return -EIO;
937 return 0;
938}
939
940static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
941{
942 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
943}
944
945static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
946{
947 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
948}
949
Vishal Verma5d0f6132013-03-04 18:40:58 -0700950int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400951 dma_addr_t dma_addr)
952{
953 struct nvme_command c;
954
955 memset(&c, 0, sizeof(c));
956 c.identify.opcode = nvme_admin_identify;
957 c.identify.nsid = cpu_to_le32(nsid);
958 c.identify.prp1 = cpu_to_le64(dma_addr);
959 c.identify.cns = cpu_to_le32(cns);
960
961 return nvme_submit_admin_cmd(dev, &c, NULL);
962}
963
Vishal Verma5d0f6132013-03-04 18:40:58 -0700964int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
Keith Busch08df1e02012-09-21 10:52:13 -0600965 dma_addr_t dma_addr, u32 *result)
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400966{
967 struct nvme_command c;
968
969 memset(&c, 0, sizeof(c));
970 c.features.opcode = nvme_admin_get_features;
Keith Buscha42cecc2012-07-25 16:06:38 -0600971 c.features.nsid = cpu_to_le32(nsid);
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400972 c.features.prp1 = cpu_to_le64(dma_addr);
973 c.features.fid = cpu_to_le32(fid);
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400974
Keith Busch08df1e02012-09-21 10:52:13 -0600975 return nvme_submit_admin_cmd(dev, &c, result);
Matthew Wilcoxdf348132012-01-11 07:29:56 -0700976}
977
Vishal Verma5d0f6132013-03-04 18:40:58 -0700978int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
979 dma_addr_t dma_addr, u32 *result)
Matthew Wilcoxdf348132012-01-11 07:29:56 -0700980{
981 struct nvme_command c;
982
983 memset(&c, 0, sizeof(c));
984 c.features.opcode = nvme_admin_set_features;
985 c.features.prp1 = cpu_to_le64(dma_addr);
986 c.features.fid = cpu_to_le32(fid);
987 c.features.dword11 = cpu_to_le32(dword11);
988
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400989 return nvme_submit_admin_cmd(dev, &c, result);
990}
991
Matthew Wilcoxa09115b2012-08-07 15:56:23 -0400992/**
993 * nvme_cancel_ios - Cancel outstanding I/Os
994 * @queue: The queue to cancel I/Os on
995 * @timeout: True to only cancel I/Os which have timed out
996 */
997static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
998{
999 int depth = nvmeq->q_depth - 1;
1000 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1001 unsigned long now = jiffies;
1002 int cmdid;
1003
1004 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1005 void *ctx;
1006 nvme_completion_fn fn;
1007 static struct nvme_completion cqe = {
Matthew Wilcoxaf2d9ca2013-04-16 15:18:30 -04001008 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001009 };
1010
1011 if (timeout && !time_after(now, info[cmdid].timeout))
1012 continue;
Keith Busch053ab7022013-04-30 11:19:38 -06001013 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
1014 continue;
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001015 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
1016 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1017 fn(nvmeq->dev, ctx, &cqe);
1018 }
1019}
1020
Matthew Wilcox9e866772012-08-03 13:55:56 -04001021static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
1022{
1023 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1024 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1025 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1026 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1027 kfree(nvmeq);
1028}
1029
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001030static void nvme_free_queue(struct nvme_dev *dev, int qid)
1031{
1032 struct nvme_queue *nvmeq = dev->queues[qid];
Matthew Wilcoxaba20802011-03-27 08:52:06 -04001033 int vector = dev->entry[nvmeq->cq_vector].vector;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001034
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001035 spin_lock_irq(&nvmeq->q_lock);
1036 nvme_cancel_ios(nvmeq, false);
Keith Busch32958742012-08-20 14:57:49 -06001037 while (bio_list_peek(&nvmeq->sq_cong)) {
1038 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1039 bio_endio(bio, -EIO);
1040 }
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001041 spin_unlock_irq(&nvmeq->q_lock);
1042
Matthew Wilcoxaba20802011-03-27 08:52:06 -04001043 irq_set_affinity_hint(vector, NULL);
1044 free_irq(vector, nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001045
1046 /* Don't tell the adapter to delete the admin queue */
1047 if (qid) {
1048 adapter_delete_sq(dev, qid);
1049 adapter_delete_cq(dev, qid);
1050 }
1051
Matthew Wilcox9e866772012-08-03 13:55:56 -04001052 nvme_free_queue_mem(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001053}
1054
1055static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1056 int depth, int vector)
1057{
1058 struct device *dmadev = &dev->pci_dev->dev;
Keith Buscha0cadb82012-07-27 13:57:23 -04001059 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
1060 sizeof(struct nvme_cmd_info));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001061 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
1062 if (!nvmeq)
1063 return NULL;
1064
1065 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
1066 &nvmeq->cq_dma_addr, GFP_KERNEL);
1067 if (!nvmeq->cqes)
1068 goto free_nvmeq;
1069 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
1070
1071 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
1072 &nvmeq->sq_dma_addr, GFP_KERNEL);
1073 if (!nvmeq->sq_cmds)
1074 goto free_cqdma;
1075
1076 nvmeq->q_dmadev = dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -05001077 nvmeq->dev = dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001078 spin_lock_init(&nvmeq->q_lock);
1079 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -05001080 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001081 init_waitqueue_head(&nvmeq->sq_full);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001082 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001083 bio_list_init(&nvmeq->sq_cong);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001084 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001085 nvmeq->q_depth = depth;
1086 nvmeq->cq_vector = vector;
1087
1088 return nvmeq;
1089
1090 free_cqdma:
Keith Busch68b8eca2013-05-01 13:07:47 -06001091 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001092 nvmeq->cq_dma_addr);
1093 free_nvmeq:
1094 kfree(nvmeq);
1095 return NULL;
1096}
1097
Matthew Wilcox30010822011-01-20 09:10:15 -05001098static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1099 const char *name)
1100{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -05001101 if (use_threaded_interrupts)
1102 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
Matthew Wilcoxec6ce612011-02-06 09:01:00 -05001103 nvme_irq_check, nvme_irq,
Matthew Wilcox58ffacb2011-02-06 07:28:06 -05001104 IRQF_DISABLED | IRQF_SHARED,
1105 name, nvmeq);
Matthew Wilcox30010822011-01-20 09:10:15 -05001106 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1107 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
1108}
1109
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001110static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
1111 int cq_size, int vector)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001112{
1113 int result;
1114 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
1115
Matthew Wilcox3f85d502011-02-01 08:39:04 -05001116 if (!nvmeq)
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001117 return ERR_PTR(-ENOMEM);
Matthew Wilcox3f85d502011-02-01 08:39:04 -05001118
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001119 result = adapter_alloc_cq(dev, qid, nvmeq);
1120 if (result < 0)
1121 goto free_nvmeq;
1122
1123 result = adapter_alloc_sq(dev, qid, nvmeq);
1124 if (result < 0)
1125 goto release_cq;
1126
Matthew Wilcox30010822011-01-20 09:10:15 -05001127 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001128 if (result < 0)
1129 goto release_sq;
1130
1131 return nvmeq;
1132
1133 release_sq:
1134 adapter_delete_sq(dev, qid);
1135 release_cq:
1136 adapter_delete_cq(dev, qid);
1137 free_nvmeq:
1138 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1139 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1140 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1141 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1142 kfree(nvmeq);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001143 return ERR_PTR(result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001144}
1145
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001146static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1147{
1148 unsigned long timeout;
1149 u32 bit = enabled ? NVME_CSTS_RDY : 0;
1150
1151 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1152
1153 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
1154 msleep(100);
1155 if (fatal_signal_pending(current))
1156 return -EINTR;
1157 if (time_after(jiffies, timeout)) {
1158 dev_err(&dev->pci_dev->dev,
1159 "Device not ready; aborting initialisation\n");
1160 return -ENODEV;
1161 }
1162 }
1163
1164 return 0;
1165}
1166
1167/*
1168 * If the device has been passed off to us in an enabled state, just clear
1169 * the enabled bit. The spec says we should set the 'shutdown notification
1170 * bits', but doing so may cause the device to complete commands to the
1171 * admin queue ... and we don't know what memory that might be pointing at!
1172 */
1173static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
1174{
Matthew Wilcox44af1462013-05-04 06:43:17 -04001175 u32 cc = readl(&dev->bar->cc);
1176
1177 if (cc & NVME_CC_ENABLE)
1178 writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001179 return nvme_wait_ready(dev, cap, false);
1180}
1181
1182static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
1183{
1184 return nvme_wait_ready(dev, cap, true);
1185}
1186
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001187static int nvme_configure_admin_queue(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001188{
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001189 int result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001190 u32 aqa;
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001191 u64 cap = readq(&dev->bar->cap);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001192 struct nvme_queue *nvmeq;
1193
1194 dev->dbs = ((void __iomem *)dev->bar) + 4096;
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001195 dev->db_stride = NVME_CAP_STRIDE(cap);
1196
1197 result = nvme_disable_ctrl(dev, cap);
1198 if (result < 0)
1199 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001200
1201 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
Matthew Wilcox3f85d502011-02-01 08:39:04 -05001202 if (!nvmeq)
1203 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001204
1205 aqa = nvmeq->q_depth - 1;
1206 aqa |= aqa << 16;
1207
1208 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
1209 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
1210 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -04001211 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001212
1213 writel(aqa, &dev->bar->aqa);
1214 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1215 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1216 writel(dev->ctrl_config, &dev->bar->cc);
1217
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001218 result = nvme_enable_ctrl(dev, cap);
Keith Busch025c5572013-05-01 13:07:51 -06001219 if (result)
1220 goto free_q;
Matthew Wilcox9e866772012-08-03 13:55:56 -04001221
Matthew Wilcox30010822011-01-20 09:10:15 -05001222 result = queue_request_irq(dev, nvmeq, "nvme admin");
Keith Busch025c5572013-05-01 13:07:51 -06001223 if (result)
1224 goto free_q;
1225
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001226 dev->queues[0] = nvmeq;
1227 return result;
Keith Busch025c5572013-05-01 13:07:51 -06001228
1229 free_q:
1230 nvme_free_queue_mem(nvmeq);
1231 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001232}
1233
Vishal Verma5d0f6132013-03-04 18:40:58 -07001234struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001235 unsigned long addr, unsigned length)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001236{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001237 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001238 struct scatterlist *sg;
1239 struct page **pages;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001240 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001241
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001242 if (addr & 3)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001243 return ERR_PTR(-EINVAL);
Dan Carpenter5460fc02013-05-13 17:59:50 +03001244 if (!length || length > INT_MAX - PAGE_SIZE)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001245 return ERR_PTR(-EINVAL);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001246
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001247 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001248 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1249 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Dan Carpenter22fff822012-01-20 07:55:30 -05001250 if (!pages)
1251 return ERR_PTR(-ENOMEM);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001252
1253 err = get_user_pages_fast(addr, count, 1, pages);
1254 if (err < count) {
1255 count = err;
1256 err = -EFAULT;
1257 goto put_pages;
1258 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001259
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001260 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1261 sg = iod->sg;
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001262 sg_init_table(sg, count);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001263 for (i = 0; i < count; i++) {
1264 sg_set_page(&sg[i], pages[i],
Dan Carpenter5460fc02013-05-13 17:59:50 +03001265 min_t(unsigned, length, PAGE_SIZE - offset),
1266 offset);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001267 length -= (PAGE_SIZE - offset);
1268 offset = 0;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001269 }
Matthew Wilcoxfe304c42012-01-06 13:49:25 -07001270 sg_mark_end(&sg[i - 1]);
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001271 iod->nents = count;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001272
1273 err = -ENOMEM;
1274 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1275 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001276 if (!nents)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001277 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001278
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001279 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001280 return iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001281
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001282 free_iod:
1283 kfree(iod);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001284 put_pages:
1285 for (i = 0; i < count; i++)
1286 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001287 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001288 return ERR_PTR(err);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001289}
1290
Vishal Verma5d0f6132013-03-04 18:40:58 -07001291void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001292 struct nvme_iod *iod)
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001293{
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001294 int i;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001295
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001296 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1297 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001298
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001299 for (i = 0; i < iod->nents; i++)
1300 put_page(sg_page(&iod->sg[i]));
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001301}
1302
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001303static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1304{
1305 struct nvme_dev *dev = ns->dev;
1306 struct nvme_queue *nvmeq;
1307 struct nvme_user_io io;
1308 struct nvme_command c;
Keith Buschf410c682013-04-23 17:23:59 -06001309 unsigned length, meta_len;
1310 int status, i;
1311 struct nvme_iod *iod, *meta_iod = NULL;
1312 dma_addr_t meta_dma_addr;
1313 void *meta, *uninitialized_var(meta_mem);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001314
1315 if (copy_from_user(&io, uio, sizeof(io)))
1316 return -EFAULT;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001317 length = (io.nblocks + 1) << ns->lba_shift;
Keith Buschf410c682013-04-23 17:23:59 -06001318 meta_len = (io.nblocks + 1) * ns->ms;
1319
1320 if (meta_len && ((io.metadata & 3) || !io.metadata))
1321 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001322
1323 switch (io.opcode) {
1324 case nvme_cmd_write:
1325 case nvme_cmd_read:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001326 case nvme_cmd_compare:
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001327 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
Matthew Wilcox64132142011-08-09 12:56:37 -04001328 break;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001329 default:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001330 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001331 }
1332
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001333 if (IS_ERR(iod))
1334 return PTR_ERR(iod);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001335
1336 memset(&c, 0, sizeof(c));
1337 c.rw.opcode = io.opcode;
1338 c.rw.flags = io.flags;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001339 c.rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001340 c.rw.slba = cpu_to_le64(io.slba);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001341 c.rw.length = cpu_to_le16(io.nblocks);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001342 c.rw.control = cpu_to_le16(io.control);
Matthew Wilcox1c9b5262013-04-16 15:21:06 -04001343 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1344 c.rw.reftag = cpu_to_le32(io.reftag);
1345 c.rw.apptag = cpu_to_le16(io.apptag);
1346 c.rw.appmask = cpu_to_le16(io.appmask);
Keith Buschf410c682013-04-23 17:23:59 -06001347
1348 if (meta_len) {
1349 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
1350 if (IS_ERR(meta_iod)) {
1351 status = PTR_ERR(meta_iod);
1352 meta_iod = NULL;
1353 goto unmap;
1354 }
1355
1356 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1357 &meta_dma_addr, GFP_KERNEL);
1358 if (!meta_mem) {
1359 status = -ENOMEM;
1360 goto unmap;
1361 }
1362
1363 if (io.opcode & 1) {
1364 int meta_offset = 0;
1365
1366 for (i = 0; i < meta_iod->nents; i++) {
1367 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1368 meta_iod->sg[i].offset;
1369 memcpy(meta_mem + meta_offset, meta,
1370 meta_iod->sg[i].length);
1371 kunmap_atomic(meta);
1372 meta_offset += meta_iod->sg[i].length;
1373 }
1374 }
1375
1376 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1377 }
1378
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001379 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001380
Matthew Wilcox040a93b2011-12-20 11:04:12 -05001381 nvmeq = get_nvmeq(dev);
Matthew Wilcoxfa922822011-03-16 16:29:00 -04001382 /*
1383 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
Matthew Wilcoxb1ad37e2011-02-04 16:14:30 -05001384 * disabled. We may be preempted at any point, and be rescheduled
1385 * to a different CPU. That will cause cacheline bouncing, but no
1386 * additional races since q_lock already protects against other CPUs.
1387 */
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001388 put_nvmeq(nvmeq);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001389 if (length != (io.nblocks + 1) << ns->lba_shift)
1390 status = -ENOMEM;
1391 else
Matthew Wilcoxff976d72011-12-20 13:53:01 -05001392 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001393
Keith Buschf410c682013-04-23 17:23:59 -06001394 if (meta_len) {
1395 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
1396 int meta_offset = 0;
1397
1398 for (i = 0; i < meta_iod->nents; i++) {
1399 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1400 meta_iod->sg[i].offset;
1401 memcpy(meta, meta_mem + meta_offset,
1402 meta_iod->sg[i].length);
1403 kunmap_atomic(meta);
1404 meta_offset += meta_iod->sg[i].length;
1405 }
1406 }
1407
1408 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
1409 meta_dma_addr);
1410 }
1411
1412 unmap:
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001413 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001414 nvme_free_iod(dev, iod);
Keith Buschf410c682013-04-23 17:23:59 -06001415
1416 if (meta_iod) {
1417 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
1418 nvme_free_iod(dev, meta_iod);
1419 }
1420
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001421 return status;
1422}
1423
Keith Busch50af8ba2012-07-25 16:07:55 -06001424static int nvme_user_admin_cmd(struct nvme_dev *dev,
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001425 struct nvme_admin_cmd __user *ucmd)
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001426{
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001427 struct nvme_admin_cmd cmd;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001428 struct nvme_command c;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001429 int status, length;
Keith Buschc7d36ab2012-07-27 11:53:28 -06001430 struct nvme_iod *uninitialized_var(iod);
Keith Busch94f370c2013-05-09 14:01:38 -06001431 unsigned timeout;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001432
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001433 if (!capable(CAP_SYS_ADMIN))
1434 return -EACCES;
1435 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001436 return -EFAULT;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001437
1438 memset(&c, 0, sizeof(c));
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001439 c.common.opcode = cmd.opcode;
1440 c.common.flags = cmd.flags;
1441 c.common.nsid = cpu_to_le32(cmd.nsid);
1442 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1443 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1444 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1445 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1446 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1447 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1448 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1449 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1450
1451 length = cmd.data_len;
1452 if (cmd.data_len) {
Matthew Wilcox49742182012-01-06 13:42:45 -07001453 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1454 length);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001455 if (IS_ERR(iod))
1456 return PTR_ERR(iod);
1457 length = nvme_setup_prps(dev, &c.common, iod, length,
1458 GFP_KERNEL);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001459 }
1460
Keith Busch94f370c2013-05-09 14:01:38 -06001461 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
1462 ADMIN_TIMEOUT;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001463 if (length != cmd.data_len)
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001464 status = -ENOMEM;
1465 else
Keith Busch94f370c2013-05-09 14:01:38 -06001466 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
1467 timeout);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001468
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001469 if (cmd.data_len) {
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001470 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001471 nvme_free_iod(dev, iod);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001472 }
Keith Buschf4f117f2012-09-21 10:49:05 -06001473
Chayan Biswascf90bc42013-05-22 22:34:49 +00001474 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
Keith Buschf4f117f2012-09-21 10:49:05 -06001475 sizeof(cmd.result)))
1476 status = -EFAULT;
1477
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001478 return status;
1479}
1480
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001481static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1482 unsigned long arg)
1483{
1484 struct nvme_ns *ns = bdev->bd_disk->private_data;
1485
1486 switch (cmd) {
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001487 case NVME_IOCTL_ID:
1488 return ns->ns_id;
1489 case NVME_IOCTL_ADMIN_CMD:
Keith Busch50af8ba2012-07-25 16:07:55 -06001490 return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001491 case NVME_IOCTL_SUBMIT_IO:
1492 return nvme_submit_io(ns, (void __user *)arg);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001493 case SG_GET_VERSION_NUM:
1494 return nvme_sg_get_version_num((void __user *)arg);
1495 case SG_IO:
1496 return nvme_sg_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001497 default:
1498 return -ENOTTY;
1499 }
1500}
1501
1502static const struct block_device_operations nvme_fops = {
1503 .owner = THIS_MODULE,
1504 .ioctl = nvme_ioctl,
Matthew Wilcox49481682011-03-19 14:55:38 -04001505 .compat_ioctl = nvme_ioctl,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001506};
1507
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001508static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1509{
1510 while (bio_list_peek(&nvmeq->sq_cong)) {
1511 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1512 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
Keith Busch427e9702013-04-09 11:59:32 -06001513
Matthew Wilcox3cb967c2011-03-16 16:45:49 -04001514 if (bio_list_empty(&nvmeq->sq_cong))
1515 remove_wait_queue(&nvmeq->sq_full,
1516 &nvmeq->sq_cong_wait);
Keith Busch427e9702013-04-09 11:59:32 -06001517 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1518 if (bio_list_empty(&nvmeq->sq_cong))
1519 add_wait_queue(&nvmeq->sq_full,
1520 &nvmeq->sq_cong_wait);
1521 bio_list_add_head(&nvmeq->sq_cong, bio);
1522 break;
1523 }
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001524 }
1525}
1526
1527static int nvme_kthread(void *data)
1528{
1529 struct nvme_dev *dev;
1530
1531 while (!kthread_should_stop()) {
Arjan van de Ven564a2322013-05-01 16:38:23 -04001532 set_current_state(TASK_INTERRUPTIBLE);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001533 spin_lock(&dev_list_lock);
1534 list_for_each_entry(dev, &dev_list, node) {
1535 int i;
1536 for (i = 0; i < dev->queue_count; i++) {
1537 struct nvme_queue *nvmeq = dev->queues[i];
Matthew Wilcox740216f2011-02-15 16:28:20 -05001538 if (!nvmeq)
1539 continue;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001540 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcoxbc57a0f2013-06-24 11:56:42 -04001541 nvme_process_cq(nvmeq);
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001542 nvme_cancel_ios(nvmeq, true);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001543 nvme_resubmit_bios(nvmeq);
1544 spin_unlock_irq(&nvmeq->q_lock);
1545 }
1546 }
1547 spin_unlock(&dev_list_lock);
Arjan van de Venacb7aa02013-02-04 14:44:33 -08001548 schedule_timeout(round_jiffies_relative(HZ));
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001549 }
1550 return 0;
1551}
1552
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001553static DEFINE_IDA(nvme_index_ida);
1554
1555static int nvme_get_ns_idx(void)
1556{
1557 int index, error;
1558
1559 do {
1560 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1561 return -1;
1562
1563 spin_lock(&dev_list_lock);
1564 error = ida_get_new(&nvme_index_ida, &index);
1565 spin_unlock(&dev_list_lock);
1566 } while (error == -EAGAIN);
1567
1568 if (error)
1569 index = -1;
1570 return index;
1571}
1572
1573static void nvme_put_ns_idx(int index)
1574{
1575 spin_lock(&dev_list_lock);
1576 ida_remove(&nvme_index_ida, index);
1577 spin_unlock(&dev_list_lock);
1578}
1579
Keith Busch0e5e4f02012-11-09 16:33:05 -07001580static void nvme_config_discard(struct nvme_ns *ns)
1581{
1582 u32 logical_block_size = queue_logical_block_size(ns->queue);
1583 ns->queue->limits.discard_zeroes_data = 0;
1584 ns->queue->limits.discard_alignment = logical_block_size;
1585 ns->queue->limits.discard_granularity = logical_block_size;
1586 ns->queue->limits.max_discard_sectors = 0xffffffff;
1587 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1588}
1589
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001590static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001591 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1592{
1593 struct nvme_ns *ns;
1594 struct gendisk *disk;
1595 int lbaf;
1596
1597 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1598 return NULL;
1599
1600 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1601 if (!ns)
1602 return NULL;
1603 ns->queue = blk_alloc_queue(GFP_KERNEL);
1604 if (!ns->queue)
1605 goto out_free_ns;
Matthew Wilcox4eeb9212012-01-10 14:35:08 -07001606 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1607 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1608 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001609 blk_queue_make_request(ns->queue, nvme_make_request);
1610 ns->dev = dev;
1611 ns->queue->queuedata = ns;
1612
1613 disk = alloc_disk(NVME_MINORS);
1614 if (!disk)
1615 goto out_free_queue;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001616 ns->ns_id = nsid;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001617 ns->disk = disk;
1618 lbaf = id->flbas & 0xf;
1619 ns->lba_shift = id->lbaf[lbaf].ds;
Keith Buschf410c682013-04-23 17:23:59 -06001620 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
Keith Busche9ef4632012-07-24 15:01:04 -06001621 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
Keith Busch8fc23e02012-07-26 11:29:57 -06001622 if (dev->max_hw_sectors)
1623 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001624
1625 disk->major = nvme_major;
1626 disk->minors = NVME_MINORS;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001627 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001628 disk->fops = &nvme_fops;
1629 disk->private_data = ns;
1630 disk->queue = ns->queue;
Matthew Wilcox388f0372011-02-01 12:49:38 -05001631 disk->driverfs_dev = &dev->pci_dev->dev;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001632 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001633 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1634
Keith Busch0e5e4f02012-11-09 16:33:05 -07001635 if (dev->oncs & NVME_CTRL_ONCS_DSM)
1636 nvme_config_discard(ns);
1637
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001638 return ns;
1639
1640 out_free_queue:
1641 blk_cleanup_queue(ns->queue);
1642 out_free_ns:
1643 kfree(ns);
1644 return NULL;
1645}
1646
1647static void nvme_ns_free(struct nvme_ns *ns)
1648{
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001649 int index = ns->disk->first_minor / NVME_MINORS;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001650 put_disk(ns->disk);
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001651 nvme_put_ns_idx(index);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001652 blk_cleanup_queue(ns->queue);
1653 kfree(ns);
1654}
1655
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001656static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001657{
1658 int status;
1659 u32 result;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001660 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001661
Matthew Wilcoxdf348132012-01-11 07:29:56 -07001662 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001663 &result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001664 if (status)
1665 return -EIO;
1666 return min(result & 0xffff, result >> 16) + 1;
1667}
1668
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001669static int nvme_setup_io_queues(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001670{
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001671 struct pci_dev *pdev = dev->pci_dev;
Matthew Wilcox063a8092013-06-20 10:53:48 -04001672 int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001673
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001674 nr_io_queues = num_online_cpus();
1675 result = set_queue_count(dev, nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001676 if (result < 0)
1677 return result;
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001678 if (result < nr_io_queues)
1679 nr_io_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001680
Matthew Wilcox1b234842011-01-20 13:01:49 -05001681 /* Deregister the admin queue's interrupt */
1682 free_irq(dev->entry[0].vector, dev->queues[0]);
1683
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001684 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1685 if (db_bar_size > 8192) {
1686 iounmap(dev->bar);
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001687 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001688 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1689 dev->queues[0]->q_db = dev->dbs;
1690 }
1691
Matthew Wilcox063a8092013-06-20 10:53:48 -04001692 vecs = nr_io_queues;
1693 for (i = 0; i < vecs; i++)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001694 dev->entry[i].entry = i;
1695 for (;;) {
Matthew Wilcox063a8092013-06-20 10:53:48 -04001696 result = pci_enable_msix(pdev, dev->entry, vecs);
1697 if (result <= 0)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001698 break;
Matthew Wilcox063a8092013-06-20 10:53:48 -04001699 vecs = result;
1700 }
1701
1702 if (result < 0) {
1703 vecs = nr_io_queues;
1704 if (vecs > 32)
1705 vecs = 32;
1706 for (;;) {
1707 result = pci_enable_msi_block(pdev, vecs);
1708 if (result == 0) {
1709 for (i = 0; i < vecs; i++)
1710 dev->entry[i].vector = i + pdev->irq;
1711 break;
1712 } else if (result < 0) {
1713 vecs = 1;
1714 break;
1715 }
1716 vecs = result;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001717 }
1718 }
1719
Matthew Wilcox063a8092013-06-20 10:53:48 -04001720 /*
1721 * Should investigate if there's a performance win from allocating
1722 * more queues than interrupt vectors; it might allow the submission
1723 * path to scale better, even if the receive path is limited by the
1724 * number of interrupts.
1725 */
1726 nr_io_queues = vecs;
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001727
Matthew Wilcox1b234842011-01-20 13:01:49 -05001728 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1729 /* XXX: handle failure here */
1730
1731 cpu = cpumask_first(cpu_online_mask);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001732 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001733 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1734 cpu = cpumask_next(cpu, cpu_online_mask);
1735 }
1736
Keith Buscha0cadb82012-07-27 13:57:23 -04001737 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1738 NVME_Q_DEPTH);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001739 for (i = 0; i < nr_io_queues; i++) {
Keith Buscha0cadb82012-07-27 13:57:23 -04001740 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001741 if (IS_ERR(dev->queues[i + 1]))
1742 return PTR_ERR(dev->queues[i + 1]);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001743 dev->queue_count++;
1744 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001745
Matthew Wilcox9ecdc942011-03-16 16:52:19 -04001746 for (; i < num_possible_cpus(); i++) {
1747 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1748 dev->queues[i + 1] = dev->queues[target + 1];
1749 }
1750
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001751 return 0;
1752}
1753
1754static void nvme_free_queues(struct nvme_dev *dev)
1755{
1756 int i;
1757
1758 for (i = dev->queue_count - 1; i >= 0; i--)
1759 nvme_free_queue(dev, i);
1760}
1761
Matthew Wilcox422ef0c2013-04-16 11:22:36 -04001762/*
1763 * Return: error value if an error occurred setting up the queues or calling
1764 * Identify Device. 0 if these succeeded, even if adding some of the
1765 * namespaces failed. At the moment, these failures are silent. TBD which
1766 * failures should be reported.
1767 */
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001768static int nvme_dev_add(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001769{
1770 int res, nn, i;
Keith Buschcbb62182013-05-01 13:07:49 -06001771 struct nvme_ns *ns;
Matthew Wilcox51814232011-02-01 16:18:08 -05001772 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001773 struct nvme_id_ns *id_ns;
1774 void *mem;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001775 dma_addr_t dma_addr;
Keith Busch159b67d2013-04-09 17:13:20 -06001776 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001777
1778 res = nvme_setup_io_queues(dev);
1779 if (res)
1780 return res;
1781
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001782 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001783 GFP_KERNEL);
Keith Buscha9ef4342013-05-01 13:07:48 -06001784 if (!mem)
1785 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001786
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001787 res = nvme_identify(dev, 0, 1, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001788 if (res) {
1789 res = -EIO;
Keith Buschcbb62182013-05-01 13:07:49 -06001790 goto out;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001791 }
1792
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001793 ctrl = mem;
Matthew Wilcox51814232011-02-01 16:18:08 -05001794 nn = le32_to_cpup(&ctrl->nn);
Keith Busch0e5e4f02012-11-09 16:33:05 -07001795 dev->oncs = le16_to_cpup(&ctrl->oncs);
Matthew Wilcox51814232011-02-01 16:18:08 -05001796 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1797 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1798 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Keith Busch159b67d2013-04-09 17:13:20 -06001799 if (ctrl->mdts)
Keith Busch8fc23e02012-07-26 11:29:57 -06001800 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
Keith Busch159b67d2013-04-09 17:13:20 -06001801 if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
1802 (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
1803 dev->stripe_size = 1 << (ctrl->vs[3] + shift);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001804
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001805 id_ns = mem;
Matthew Wilcox2b2c1892011-10-07 13:10:13 -04001806 for (i = 1; i <= nn; i++) {
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001807 res = nvme_identify(dev, i, 0, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001808 if (res)
1809 continue;
1810
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001811 if (id_ns->ncap == 0)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001812 continue;
1813
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001814 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
Keith Busch08df1e02012-09-21 10:52:13 -06001815 dma_addr + 4096, NULL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001816 if (res)
Keith Busch12209032013-01-31 14:40:38 -07001817 memset(mem + 4096, 0, 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001818
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001819 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001820 if (ns)
1821 list_add_tail(&ns->list, &dev->namespaces);
1822 }
1823 list_for_each_entry(ns, &dev->namespaces, list)
1824 add_disk(ns->disk);
Matthew Wilcox422ef0c2013-04-16 11:22:36 -04001825 res = 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001826
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001827 out:
Matthew Wilcox684f5c22011-09-19 17:14:53 -04001828 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001829 return res;
1830}
1831
1832static int nvme_dev_remove(struct nvme_dev *dev)
1833{
1834 struct nvme_ns *ns, *next;
1835
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001836 spin_lock(&dev_list_lock);
1837 list_del(&dev->node);
1838 spin_unlock(&dev_list_lock);
1839
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001840 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1841 list_del(&ns->list);
1842 del_gendisk(ns->disk);
1843 nvme_ns_free(ns);
1844 }
1845
1846 nvme_free_queues(dev);
1847
1848 return 0;
1849}
1850
Matthew Wilcox091b6092011-02-10 09:56:01 -05001851static int nvme_setup_prp_pools(struct nvme_dev *dev)
1852{
1853 struct device *dmadev = &dev->pci_dev->dev;
1854 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1855 PAGE_SIZE, PAGE_SIZE, 0);
1856 if (!dev->prp_page_pool)
1857 return -ENOMEM;
1858
Matthew Wilcox99802a72011-02-10 10:30:34 -05001859 /* Optimisation for I/Os between 4k and 128k */
1860 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1861 256, 256, 0);
1862 if (!dev->prp_small_pool) {
1863 dma_pool_destroy(dev->prp_page_pool);
1864 return -ENOMEM;
1865 }
Matthew Wilcox091b6092011-02-10 09:56:01 -05001866 return 0;
1867}
1868
1869static void nvme_release_prp_pools(struct nvme_dev *dev)
1870{
1871 dma_pool_destroy(dev->prp_page_pool);
Matthew Wilcox99802a72011-02-10 10:30:34 -05001872 dma_pool_destroy(dev->prp_small_pool);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001873}
1874
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001875static DEFINE_IDA(nvme_instance_ida);
1876
1877static int nvme_set_instance(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001878{
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001879 int instance, error;
1880
1881 do {
1882 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1883 return -ENODEV;
1884
1885 spin_lock(&dev_list_lock);
1886 error = ida_get_new(&nvme_instance_ida, &instance);
1887 spin_unlock(&dev_list_lock);
1888 } while (error == -EAGAIN);
1889
1890 if (error)
1891 return -ENODEV;
1892
1893 dev->instance = instance;
1894 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001895}
1896
1897static void nvme_release_instance(struct nvme_dev *dev)
1898{
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001899 spin_lock(&dev_list_lock);
1900 ida_remove(&nvme_instance_ida, dev->instance);
1901 spin_unlock(&dev_list_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001902}
1903
Keith Busch5e82e952013-02-19 10:17:58 -07001904static void nvme_free_dev(struct kref *kref)
1905{
1906 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1907 nvme_dev_remove(dev);
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001908 if (dev->pci_dev->msi_enabled)
1909 pci_disable_msi(dev->pci_dev);
1910 else if (dev->pci_dev->msix_enabled)
1911 pci_disable_msix(dev->pci_dev);
Keith Busch5e82e952013-02-19 10:17:58 -07001912 iounmap(dev->bar);
1913 nvme_release_instance(dev);
1914 nvme_release_prp_pools(dev);
1915 pci_disable_device(dev->pci_dev);
1916 pci_release_regions(dev->pci_dev);
1917 kfree(dev->queues);
1918 kfree(dev->entry);
1919 kfree(dev);
1920}
1921
1922static int nvme_dev_open(struct inode *inode, struct file *f)
1923{
1924 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
1925 miscdev);
1926 kref_get(&dev->kref);
1927 f->private_data = dev;
1928 return 0;
1929}
1930
1931static int nvme_dev_release(struct inode *inode, struct file *f)
1932{
1933 struct nvme_dev *dev = f->private_data;
1934 kref_put(&dev->kref, nvme_free_dev);
1935 return 0;
1936}
1937
1938static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1939{
1940 struct nvme_dev *dev = f->private_data;
1941 switch (cmd) {
1942 case NVME_IOCTL_ADMIN_CMD:
1943 return nvme_user_admin_cmd(dev, (void __user *)arg);
1944 default:
1945 return -ENOTTY;
1946 }
1947}
1948
1949static const struct file_operations nvme_dev_fops = {
1950 .owner = THIS_MODULE,
1951 .open = nvme_dev_open,
1952 .release = nvme_dev_release,
1953 .unlocked_ioctl = nvme_dev_ioctl,
1954 .compat_ioctl = nvme_dev_ioctl,
1955};
1956
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001957static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001958{
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001959 int bars, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001960 struct nvme_dev *dev;
1961
1962 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1963 if (!dev)
1964 return -ENOMEM;
1965 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1966 GFP_KERNEL);
1967 if (!dev->entry)
1968 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001969 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1970 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001971 if (!dev->queues)
1972 goto free;
1973
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001974 if (pci_enable_device_mem(pdev))
1975 goto free;
Matthew Wilcoxf64d3362011-02-01 09:01:59 -05001976 pci_set_master(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001977 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1978 if (pci_request_selected_regions(pdev, bars, "nvme"))
1979 goto disable;
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001980
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001981 INIT_LIST_HEAD(&dev->namespaces);
1982 dev->pci_dev = pdev;
1983 pci_set_drvdata(pdev, dev);
Matthew Wilcoxcf9f1232013-05-28 16:46:46 -04001984
1985 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
1986 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1987 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
1988 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1989 else
1990 goto disable;
1991
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001992 result = nvme_set_instance(dev);
1993 if (result)
1994 goto disable;
1995
Matthew Wilcox53c95772011-01-20 13:42:34 -05001996 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001997
Matthew Wilcox091b6092011-02-10 09:56:01 -05001998 result = nvme_setup_prp_pools(dev);
1999 if (result)
2000 goto disable_msix;
2001
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002002 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
2003 if (!dev->bar) {
2004 result = -ENOMEM;
Matthew Wilcox574e8b92011-02-01 16:24:35 -05002005 goto disable_msix;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002006 }
2007
2008 result = nvme_configure_admin_queue(dev);
2009 if (result)
2010 goto unmap;
2011 dev->queue_count++;
2012
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002013 spin_lock(&dev_list_lock);
2014 list_add(&dev->node, &dev_list);
2015 spin_unlock(&dev_list_lock);
2016
Matthew Wilcox740216f2011-02-15 16:28:20 -05002017 result = nvme_dev_add(dev);
2018 if (result)
2019 goto delete;
2020
Keith Busch5e82e952013-02-19 10:17:58 -07002021 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2022 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2023 dev->miscdev.parent = &pdev->dev;
2024 dev->miscdev.name = dev->name;
2025 dev->miscdev.fops = &nvme_dev_fops;
2026 result = misc_register(&dev->miscdev);
2027 if (result)
2028 goto remove;
2029
2030 kref_init(&dev->kref);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002031 return 0;
2032
Keith Busch5e82e952013-02-19 10:17:58 -07002033 remove:
2034 nvme_dev_remove(dev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002035 delete:
Matthew Wilcox740216f2011-02-15 16:28:20 -05002036 spin_lock(&dev_list_lock);
2037 list_del(&dev->node);
2038 spin_unlock(&dev_list_lock);
2039
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002040 nvme_free_queues(dev);
2041 unmap:
2042 iounmap(dev->bar);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05002043 disable_msix:
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07002044 if (dev->pci_dev->msi_enabled)
2045 pci_disable_msi(dev->pci_dev);
2046 else if (dev->pci_dev->msix_enabled)
2047 pci_disable_msix(dev->pci_dev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002048 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05002049 nvme_release_prp_pools(dev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05002050 disable:
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05002051 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05002052 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002053 free:
2054 kfree(dev->queues);
2055 kfree(dev->entry);
2056 kfree(dev);
2057 return result;
2058}
2059
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08002060static void nvme_remove(struct pci_dev *pdev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002061{
2062 struct nvme_dev *dev = pci_get_drvdata(pdev);
Keith Busch5e82e952013-02-19 10:17:58 -07002063 misc_deregister(&dev->miscdev);
2064 kref_put(&dev->kref, nvme_free_dev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002065}
2066
2067/* These functions are yet to be implemented */
2068#define nvme_error_detected NULL
2069#define nvme_dump_registers NULL
2070#define nvme_link_reset NULL
2071#define nvme_slot_reset NULL
2072#define nvme_error_resume NULL
2073#define nvme_suspend NULL
2074#define nvme_resume NULL
2075
Stephen Hemminger1d352032012-09-07 09:33:17 -07002076static const struct pci_error_handlers nvme_err_handler = {
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002077 .error_detected = nvme_error_detected,
2078 .mmio_enabled = nvme_dump_registers,
2079 .link_reset = nvme_link_reset,
2080 .slot_reset = nvme_slot_reset,
2081 .resume = nvme_error_resume,
2082};
2083
2084/* Move to pci_ids.h later */
2085#define PCI_CLASS_STORAGE_EXPRESS 0x010802
2086
2087static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
2088 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2089 { 0, }
2090};
2091MODULE_DEVICE_TABLE(pci, nvme_id_table);
2092
2093static struct pci_driver nvme_driver = {
2094 .name = "nvme",
2095 .id_table = nvme_id_table,
2096 .probe = nvme_probe,
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08002097 .remove = nvme_remove,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002098 .suspend = nvme_suspend,
2099 .resume = nvme_resume,
2100 .err_handler = &nvme_err_handler,
2101};
2102
2103static int __init nvme_init(void)
2104{
Matthew Wilcox0ac13142012-07-31 13:31:15 -04002105 int result;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002106
2107 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2108 if (IS_ERR(nvme_thread))
2109 return PTR_ERR(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002110
Keith Busch5c42ea12012-07-25 16:05:18 -06002111 result = register_blkdev(nvme_major, "nvme");
2112 if (result < 0)
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002113 goto kill_kthread;
Keith Busch5c42ea12012-07-25 16:05:18 -06002114 else if (result > 0)
Matthew Wilcox0ac13142012-07-31 13:31:15 -04002115 nvme_major = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002116
2117 result = pci_register_driver(&nvme_driver);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002118 if (result)
2119 goto unregister_blkdev;
2120 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002121
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002122 unregister_blkdev:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002123 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002124 kill_kthread:
2125 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002126 return result;
2127}
2128
2129static void __exit nvme_exit(void)
2130{
2131 pci_unregister_driver(&nvme_driver);
2132 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002133 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002134}
2135
2136MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2137MODULE_LICENSE("GPL");
Matthew Wilcox366e8212012-01-10 16:30:15 -05002138MODULE_VERSION("0.8");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002139module_init(nvme_init);
2140module_exit(nvme_exit);