blob: 71dfedbadc262f8a5512f32eb3d4d67b1c2cb3c9 [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * NVMe I/O command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/blkdev.h>
8#include <linux/module.h>
9#include "nvmet.h"
10
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -040011int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
12{
13 int ret;
14
15 ns->bdev = blkdev_get_by_path(ns->device_path,
16 FMODE_READ | FMODE_WRITE, NULL);
17 if (IS_ERR(ns->bdev)) {
18 ret = PTR_ERR(ns->bdev);
19 if (ret != -ENOTBLK) {
20 pr_err("failed to open block device %s: (%ld)\n",
21 ns->device_path, PTR_ERR(ns->bdev));
22 }
23 ns->bdev = NULL;
24 return ret;
25 }
26 ns->size = i_size_read(ns->bdev->bd_inode);
27 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
28 return 0;
29}
30
31void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
32{
33 if (ns->bdev) {
34 blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
35 ns->bdev = NULL;
36 }
37}
38
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -080039static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
40{
41 u16 status = NVME_SC_SUCCESS;
42
43 if (likely(blk_sts == BLK_STS_OK))
44 return status;
45 /*
46 * Right now there exists M : 1 mapping between block layer error
47 * to the NVMe status code (see nvme_error_status()). For consistency,
48 * when we reverse map we use most appropriate NVMe Status code from
49 * the group of the NVMe staus codes used in the nvme_error_status().
50 */
51 switch (blk_sts) {
52 case BLK_STS_NOSPC:
53 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
54 req->error_loc = offsetof(struct nvme_rw_command, length);
55 break;
56 case BLK_STS_TARGET:
57 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
58 req->error_loc = offsetof(struct nvme_rw_command, slba);
59 break;
60 case BLK_STS_NOTSUPP:
61 req->error_loc = offsetof(struct nvme_common_command, opcode);
62 switch (req->cmd->common.opcode) {
63 case nvme_cmd_dsm:
64 case nvme_cmd_write_zeroes:
65 status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66 break;
67 default:
68 status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69 }
70 break;
71 case BLK_STS_MEDIUM:
72 status = NVME_SC_ACCESS_DENIED;
73 req->error_loc = offsetof(struct nvme_rw_command, nsid);
74 break;
75 case BLK_STS_IOERR:
76 /* fallthru */
77 default:
78 status = NVME_SC_INTERNAL | NVME_SC_DNR;
79 req->error_loc = offsetof(struct nvme_common_command, opcode);
80 }
81
82 switch (req->cmd->common.opcode) {
83 case nvme_cmd_read:
84 case nvme_cmd_write:
85 req->error_slba = le64_to_cpu(req->cmd->rw.slba);
86 break;
87 case nvme_cmd_write_zeroes:
88 req->error_slba =
89 le64_to_cpu(req->cmd->write_zeroes.slba);
90 break;
91 default:
92 req->error_slba = 0;
93 }
94 return status;
95}
96
Christoph Hellwiga07b4972016-06-21 18:04:20 +020097static void nvmet_bio_done(struct bio *bio)
98{
99 struct nvmet_req *req = bio->bi_private;
100
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800101 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400102 if (bio != &req->b.inline_bio)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200103 bio_put(bio);
104}
105
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400106static void nvmet_bdev_execute_rw(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200107{
108 int sg_cnt = req->sg_cnt;
Sagi Grimberg73383ad2018-09-28 15:40:43 -0700109 struct bio *bio;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200110 struct scatterlist *sg;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200111 sector_t sector;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200112 int op, op_flags = 0, i;
113
114 if (!req->sg_cnt) {
115 nvmet_req_complete(req, 0);
116 return;
117 }
118
119 if (req->cmd->rw.opcode == nvme_cmd_write) {
120 op = REQ_OP_WRITE;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600121 op_flags = REQ_SYNC | REQ_IDLE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200122 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
123 op_flags |= REQ_FUA;
124 } else {
125 op = REQ_OP_READ;
126 }
127
Logan Gunthorpec6925092018-10-04 15:27:47 -0600128 if (is_pci_p2pdma_page(sg_page(req->sg)))
129 op_flags |= REQ_NOMERGE;
130
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200131 sector = le64_to_cpu(req->cmd->rw.slba);
132 sector <<= (req->ns->blksize_shift - 9);
133
Sagi Grimberg73383ad2018-09-28 15:40:43 -0700134 if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
135 bio = &req->b.inline_bio;
136 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
137 } else {
138 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
139 }
Christoph Hellwig74d46992017-08-23 19:10:32 +0200140 bio_set_dev(bio, req->ns->bdev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200141 bio->bi_iter.bi_sector = sector;
142 bio->bi_private = req;
143 bio->bi_end_io = nvmet_bio_done;
144 bio_set_op_attrs(bio, op, op_flags);
145
146 for_each_sg(req->sg, sg, req->sg_cnt, i) {
147 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
148 != sg->length) {
149 struct bio *prev = bio;
150
151 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
Christoph Hellwig74d46992017-08-23 19:10:32 +0200152 bio_set_dev(bio, req->ns->bdev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200153 bio->bi_iter.bi_sector = sector;
154 bio_set_op_attrs(bio, op, op_flags);
155
156 bio_chain(bio, prev);
Max Gurtovoyc2f30f082017-07-10 17:24:02 +0300157 submit_bio(prev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200158 }
159
160 sector += sg->length >> 9;
161 sg_cnt--;
162 }
163
Sagi Grimberg16d3a282018-12-12 23:01:54 -0800164 submit_bio(bio);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200165}
166
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400167static void nvmet_bdev_execute_flush(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200168{
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400169 struct bio *bio = &req->b.inline_bio;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200170
Christoph Hellwige454d122017-11-09 14:29:27 +0100171 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
Christoph Hellwig74d46992017-08-23 19:10:32 +0200172 bio_set_dev(bio, req->ns->bdev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200173 bio->bi_private = req;
174 bio->bi_end_io = nvmet_bio_done;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600175 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200176
177 submit_bio(bio);
178}
179
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700180u16 nvmet_bdev_flush(struct nvmet_req *req)
181{
182 if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
183 return NVME_SC_INTERNAL | NVME_SC_DNR;
184 return 0;
185}
186
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800187static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200188 struct nvme_dsm_range *range, struct bio **bio)
189{
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800190 struct nvmet_ns *ns = req->ns;
Israel Rukshin7756f722018-01-30 10:07:01 +0000191 int ret;
192
193 ret = __blkdev_issue_discard(ns->bdev,
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200194 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
195 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
Israel Rukshin7756f722018-01-30 10:07:01 +0000196 GFP_KERNEL, 0, bio);
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800197
198 if (ret)
199 req->error_slba = le64_to_cpu(range->slba);
200
201 return blk_to_nvme_status(req, errno_to_blk_status(ret));
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200202}
203
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400204static void nvmet_bdev_execute_discard(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200205{
206 struct nvme_dsm_range range;
207 struct bio *bio = NULL;
208 int i;
209 u16 status;
210
211 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
212 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
213 sizeof(range));
214 if (status)
215 break;
216
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800217 status = nvmet_bdev_discard_range(req, &range, &bio);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200218 if (status)
219 break;
220 }
221
222 if (bio) {
223 bio->bi_private = req;
224 bio->bi_end_io = nvmet_bio_done;
225 if (status) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200226 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200227 bio_endio(bio);
228 } else {
229 submit_bio(bio);
230 }
231 } else {
232 nvmet_req_complete(req, status);
233 }
234}
235
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400236static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200237{
238 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
239 case NVME_DSMGMT_AD:
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400240 nvmet_bdev_execute_discard(req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200241 return;
242 case NVME_DSMGMT_IDR:
243 case NVME_DSMGMT_IDW:
244 default:
245 /* Not supported yet */
246 nvmet_req_complete(req, 0);
247 return;
248 }
249}
250
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400251static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800252{
253 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
254 struct bio *bio = NULL;
255 u16 status = NVME_SC_SUCCESS;
256 sector_t sector;
257 sector_t nr_sector;
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800258 int ret;
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800259
260 sector = le64_to_cpu(write_zeroes->slba) <<
261 (req->ns->blksize_shift - 9);
Rodrigo R. Galvao543c09c2018-04-12 09:16:11 -0600262 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
263 (req->ns->blksize_shift - 9));
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800264
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800265 ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
266 GFP_KERNEL, &bio, 0);
267 status = blk_to_nvme_status(req, errno_to_blk_status(ret));
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800268 if (bio) {
269 bio->bi_private = req;
270 bio->bi_end_io = nvmet_bio_done;
271 submit_bio(bio);
272 } else {
273 nvmet_req_complete(req, status);
274 }
275}
276
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400277u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200278{
279 struct nvme_command *cmd = req->cmd;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200280
281 switch (cmd->common.opcode) {
282 case nvme_cmd_read:
283 case nvme_cmd_write:
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400284 req->execute = nvmet_bdev_execute_rw;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200285 req->data_len = nvmet_rw_len(req);
286 return 0;
287 case nvme_cmd_flush:
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400288 req->execute = nvmet_bdev_execute_flush;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200289 req->data_len = 0;
290 return 0;
291 case nvme_cmd_dsm:
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400292 req->execute = nvmet_bdev_execute_dsm;
Christoph Hellwig793c7ed2017-03-31 17:00:08 +0200293 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200294 sizeof(struct nvme_dsm_range);
295 return 0;
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800296 case nvme_cmd_write_zeroes:
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400297 req->execute = nvmet_bdev_execute_write_zeroes;
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800298 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200299 default:
Parav Pandit64a0ca82017-02-27 23:21:33 -0600300 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
301 req->sq->qid);
Chaitanya Kulkarni3b031d12018-12-12 15:11:42 -0800302 req->error_loc = offsetof(struct nvme_common_command, opcode);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200303 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
304 }
305}