Chaitanya Kulkarni | aaf2e04 | 2021-06-09 18:32:52 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * NVMe ZNS-ZBD command implementation. |
| 4 | * Copyright (C) 2021 Western Digital Corporation or its affiliates. |
| 5 | */ |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | #include <linux/nvme.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include "nvmet.h" |
| 10 | |
| 11 | /* |
| 12 | * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0 |
| 13 | * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k |
| 14 | * as page_shift value. When calculating the ZASL use shift by 12. |
| 15 | */ |
| 16 | #define NVMET_MPSMIN_SHIFT 12 |
| 17 | |
| 18 | static inline u8 nvmet_zasl(unsigned int zone_append_sects) |
| 19 | { |
| 20 | /* |
| 21 | * Zone Append Size Limit (zasl) is expressed as a power of 2 value |
| 22 | * with the minimum memory page size (i.e. 12) as unit. |
| 23 | */ |
| 24 | return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9)); |
| 25 | } |
| 26 | |
| 27 | static int validate_conv_zones_cb(struct blk_zone *z, |
| 28 | unsigned int i, void *data) |
| 29 | { |
| 30 | if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) |
| 31 | return -EOPNOTSUPP; |
| 32 | return 0; |
| 33 | } |
| 34 | |
| 35 | bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) |
| 36 | { |
| 37 | struct request_queue *q = ns->bdev->bd_disk->queue; |
| 38 | u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q)); |
| 39 | struct gendisk *bd_disk = ns->bdev->bd_disk; |
| 40 | int ret; |
| 41 | |
| 42 | if (ns->subsys->zasl) { |
| 43 | if (ns->subsys->zasl > zasl) |
| 44 | return false; |
| 45 | } |
| 46 | ns->subsys->zasl = zasl; |
| 47 | |
| 48 | /* |
| 49 | * Generic zoned block devices may have a smaller last zone which is |
| 50 | * not supported by ZNS. Exclude zoned drives that have such smaller |
| 51 | * last zone. |
| 52 | */ |
| 53 | if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) |
| 54 | return false; |
| 55 | /* |
| 56 | * ZNS does not define a conventional zone type. If the underlying |
| 57 | * device has a bitmap set indicating the existence of conventional |
| 58 | * zones, reject the device. Otherwise, use report zones to detect if |
| 59 | * the device has conventional zones. |
| 60 | */ |
| 61 | if (ns->bdev->bd_disk->queue->conv_zones_bitmap) |
| 62 | return false; |
| 63 | |
| 64 | ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk), |
| 65 | validate_conv_zones_cb, NULL); |
| 66 | if (ret < 0) |
| 67 | return false; |
| 68 | |
| 69 | ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); |
| 70 | |
| 71 | return true; |
| 72 | } |
| 73 | |
| 74 | void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req) |
| 75 | { |
| 76 | u8 zasl = req->sq->ctrl->subsys->zasl; |
| 77 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 78 | struct nvme_id_ctrl_zns *id; |
| 79 | u16 status; |
| 80 | |
| 81 | id = kzalloc(sizeof(*id), GFP_KERNEL); |
| 82 | if (!id) { |
| 83 | status = NVME_SC_INTERNAL; |
| 84 | goto out; |
| 85 | } |
| 86 | |
| 87 | if (ctrl->ops->get_mdts) |
| 88 | id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); |
| 89 | else |
| 90 | id->zasl = zasl; |
| 91 | |
| 92 | status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); |
| 93 | |
| 94 | kfree(id); |
| 95 | out: |
| 96 | nvmet_req_complete(req, status); |
| 97 | } |
| 98 | |
| 99 | void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) |
| 100 | { |
| 101 | struct nvme_id_ns_zns *id_zns; |
| 102 | u64 zsze; |
| 103 | u16 status; |
| 104 | |
| 105 | if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { |
| 106 | req->error_loc = offsetof(struct nvme_identify, nsid); |
| 107 | status = NVME_SC_INVALID_NS | NVME_SC_DNR; |
| 108 | goto out; |
| 109 | } |
| 110 | |
| 111 | id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL); |
| 112 | if (!id_zns) { |
| 113 | status = NVME_SC_INTERNAL; |
| 114 | goto out; |
| 115 | } |
| 116 | |
| 117 | status = nvmet_req_find_ns(req); |
| 118 | if (status) { |
| 119 | status = NVME_SC_INTERNAL; |
| 120 | goto done; |
| 121 | } |
| 122 | |
| 123 | if (!bdev_is_zoned(req->ns->bdev)) { |
| 124 | req->error_loc = offsetof(struct nvme_identify, nsid); |
| 125 | status = NVME_SC_INVALID_NS | NVME_SC_DNR; |
| 126 | goto done; |
| 127 | } |
| 128 | |
| 129 | nvmet_ns_revalidate(req->ns); |
| 130 | zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> |
| 131 | req->ns->blksize_shift; |
| 132 | id_zns->lbafe[0].zsze = cpu_to_le64(zsze); |
| 133 | id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); |
| 134 | id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); |
| 135 | |
| 136 | done: |
| 137 | status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); |
| 138 | kfree(id_zns); |
| 139 | out: |
| 140 | nvmet_req_complete(req, status); |
| 141 | } |
| 142 | |
| 143 | static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) |
| 144 | { |
| 145 | sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); |
| 146 | u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; |
| 147 | |
| 148 | if (sect >= get_capacity(req->ns->bdev->bd_disk)) { |
| 149 | req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); |
| 150 | return NVME_SC_LBA_RANGE | NVME_SC_DNR; |
| 151 | } |
| 152 | |
| 153 | if (out_bufsize < sizeof(struct nvme_zone_report)) { |
| 154 | req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); |
| 155 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 156 | } |
| 157 | |
| 158 | if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { |
| 159 | req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); |
| 160 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 161 | } |
| 162 | |
| 163 | switch (req->cmd->zmr.pr) { |
| 164 | case 0: |
| 165 | case 1: |
| 166 | break; |
| 167 | default: |
| 168 | req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); |
| 169 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 170 | } |
| 171 | |
| 172 | switch (req->cmd->zmr.zrasf) { |
| 173 | case NVME_ZRASF_ZONE_REPORT_ALL: |
| 174 | case NVME_ZRASF_ZONE_STATE_EMPTY: |
| 175 | case NVME_ZRASF_ZONE_STATE_IMP_OPEN: |
| 176 | case NVME_ZRASF_ZONE_STATE_EXP_OPEN: |
| 177 | case NVME_ZRASF_ZONE_STATE_CLOSED: |
| 178 | case NVME_ZRASF_ZONE_STATE_FULL: |
| 179 | case NVME_ZRASF_ZONE_STATE_READONLY: |
| 180 | case NVME_ZRASF_ZONE_STATE_OFFLINE: |
| 181 | break; |
| 182 | default: |
| 183 | req->error_loc = |
| 184 | offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); |
| 185 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 186 | } |
| 187 | |
| 188 | return NVME_SC_SUCCESS; |
| 189 | } |
| 190 | |
| 191 | struct nvmet_report_zone_data { |
| 192 | struct nvmet_req *req; |
| 193 | u64 out_buf_offset; |
| 194 | u64 out_nr_zones; |
| 195 | u64 nr_zones; |
| 196 | u8 zrasf; |
| 197 | }; |
| 198 | |
| 199 | static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d) |
| 200 | { |
| 201 | static const unsigned int nvme_zrasf_to_blk_zcond[] = { |
| 202 | [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY, |
| 203 | [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN, |
| 204 | [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN, |
| 205 | [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED, |
| 206 | [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY, |
| 207 | [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL, |
| 208 | [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE, |
| 209 | }; |
| 210 | struct nvmet_report_zone_data *rz = d; |
| 211 | |
| 212 | if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL && |
| 213 | z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf]) |
| 214 | return 0; |
| 215 | |
| 216 | if (rz->nr_zones < rz->out_nr_zones) { |
| 217 | struct nvme_zone_descriptor zdesc = { }; |
| 218 | u16 status; |
| 219 | |
| 220 | zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); |
| 221 | zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); |
| 222 | zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); |
| 223 | zdesc.za = z->reset ? 1 << 2 : 0; |
| 224 | zdesc.zs = z->cond << 4; |
| 225 | zdesc.zt = z->type; |
| 226 | |
| 227 | status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, |
| 228 | sizeof(zdesc)); |
| 229 | if (status) |
| 230 | return -EINVAL; |
| 231 | |
| 232 | rz->out_buf_offset += sizeof(zdesc); |
| 233 | } |
| 234 | |
| 235 | rz->nr_zones++; |
| 236 | |
| 237 | return 0; |
| 238 | } |
| 239 | |
| 240 | static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) |
| 241 | { |
| 242 | unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); |
| 243 | |
| 244 | return blkdev_nr_zones(req->ns->bdev->bd_disk) - |
| 245 | (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); |
| 246 | } |
| 247 | |
| 248 | static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) |
| 249 | { |
| 250 | if (bufsize <= sizeof(struct nvme_zone_report)) |
| 251 | return 0; |
| 252 | |
| 253 | return (bufsize - sizeof(struct nvme_zone_report)) / |
| 254 | sizeof(struct nvme_zone_descriptor); |
| 255 | } |
| 256 | |
| 257 | static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w) |
| 258 | { |
| 259 | struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); |
| 260 | sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); |
| 261 | unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); |
| 262 | u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; |
| 263 | __le64 nr_zones; |
| 264 | u16 status; |
| 265 | int ret; |
| 266 | struct nvmet_report_zone_data rz_data = { |
| 267 | .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), |
| 268 | /* leave the place for report zone header */ |
| 269 | .out_buf_offset = sizeof(struct nvme_zone_report), |
| 270 | .zrasf = req->cmd->zmr.zrasf, |
| 271 | .nr_zones = 0, |
| 272 | .req = req, |
| 273 | }; |
| 274 | |
| 275 | status = nvmet_bdev_validate_zone_mgmt_recv(req); |
| 276 | if (status) |
| 277 | goto out; |
| 278 | |
| 279 | if (!req_slba_nr_zones) { |
| 280 | status = NVME_SC_SUCCESS; |
| 281 | goto out; |
| 282 | } |
| 283 | |
| 284 | ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, |
| 285 | nvmet_bdev_report_zone_cb, &rz_data); |
| 286 | if (ret < 0) { |
| 287 | status = NVME_SC_INTERNAL; |
| 288 | goto out; |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * When partial bit is set nr_zones must indicate the number of zone |
| 293 | * descriptors actually transferred. |
| 294 | */ |
| 295 | if (req->cmd->zmr.pr) |
| 296 | rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones); |
| 297 | |
| 298 | nr_zones = cpu_to_le64(rz_data.nr_zones); |
| 299 | status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); |
| 300 | |
| 301 | out: |
| 302 | nvmet_req_complete(req, status); |
| 303 | } |
| 304 | |
| 305 | void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) |
| 306 | { |
| 307 | INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); |
| 308 | queue_work(zbd_wq, &req->z.zmgmt_work); |
| 309 | } |
| 310 | |
| 311 | static inline enum req_opf zsa_req_op(u8 zsa) |
| 312 | { |
| 313 | switch (zsa) { |
| 314 | case NVME_ZONE_OPEN: |
| 315 | return REQ_OP_ZONE_OPEN; |
| 316 | case NVME_ZONE_CLOSE: |
| 317 | return REQ_OP_ZONE_CLOSE; |
| 318 | case NVME_ZONE_FINISH: |
| 319 | return REQ_OP_ZONE_FINISH; |
| 320 | case NVME_ZONE_RESET: |
| 321 | return REQ_OP_ZONE_RESET; |
| 322 | default: |
| 323 | return REQ_OP_LAST; |
| 324 | } |
| 325 | } |
| 326 | |
| 327 | static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) |
| 328 | { |
| 329 | switch (ret) { |
| 330 | case 0: |
| 331 | return NVME_SC_SUCCESS; |
| 332 | case -EINVAL: |
| 333 | case -EIO: |
| 334 | return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; |
| 335 | default: |
| 336 | return NVME_SC_INTERNAL; |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | struct nvmet_zone_mgmt_send_all_data { |
| 341 | unsigned long *zbitmap; |
| 342 | struct nvmet_req *req; |
| 343 | }; |
| 344 | |
| 345 | static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) |
| 346 | { |
| 347 | struct nvmet_zone_mgmt_send_all_data *data = d; |
| 348 | |
| 349 | switch (zsa_req_op(data->req->cmd->zms.zsa)) { |
| 350 | case REQ_OP_ZONE_OPEN: |
| 351 | switch (z->cond) { |
| 352 | case BLK_ZONE_COND_CLOSED: |
| 353 | break; |
| 354 | default: |
| 355 | return 0; |
| 356 | } |
| 357 | break; |
| 358 | case REQ_OP_ZONE_CLOSE: |
| 359 | switch (z->cond) { |
| 360 | case BLK_ZONE_COND_IMP_OPEN: |
| 361 | case BLK_ZONE_COND_EXP_OPEN: |
| 362 | break; |
| 363 | default: |
| 364 | return 0; |
| 365 | } |
| 366 | break; |
| 367 | case REQ_OP_ZONE_FINISH: |
| 368 | switch (z->cond) { |
| 369 | case BLK_ZONE_COND_IMP_OPEN: |
| 370 | case BLK_ZONE_COND_EXP_OPEN: |
| 371 | case BLK_ZONE_COND_CLOSED: |
| 372 | break; |
| 373 | default: |
| 374 | return 0; |
| 375 | } |
| 376 | break; |
| 377 | default: |
| 378 | return -EINVAL; |
| 379 | } |
| 380 | |
| 381 | set_bit(i, data->zbitmap); |
| 382 | |
| 383 | return 0; |
| 384 | } |
| 385 | |
| 386 | static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) |
| 387 | { |
| 388 | struct block_device *bdev = req->ns->bdev; |
| 389 | unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk); |
| 390 | struct request_queue *q = bdev_get_queue(bdev); |
| 391 | struct bio *bio = NULL; |
| 392 | sector_t sector = 0; |
| 393 | int ret; |
| 394 | struct nvmet_zone_mgmt_send_all_data d = { |
| 395 | .req = req, |
| 396 | }; |
| 397 | |
| 398 | d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), |
| 399 | GFP_NOIO, q->node); |
| 400 | if (!d.zbitmap) { |
| 401 | ret = -ENOMEM; |
| 402 | goto out; |
| 403 | } |
| 404 | |
| 405 | /* Scan and build bitmap of the eligible zones */ |
| 406 | ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d); |
| 407 | if (ret != nr_zones) { |
| 408 | if (ret > 0) |
| 409 | ret = -EIO; |
| 410 | goto out; |
| 411 | } else { |
| 412 | /* We scanned all the zones */ |
| 413 | ret = 0; |
| 414 | } |
| 415 | |
| 416 | while (sector < get_capacity(bdev->bd_disk)) { |
| 417 | if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { |
| 418 | bio = blk_next_bio(bio, 0, GFP_KERNEL); |
| 419 | bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; |
| 420 | bio->bi_iter.bi_sector = sector; |
| 421 | bio_set_dev(bio, bdev); |
| 422 | /* This may take a while, so be nice to others */ |
| 423 | cond_resched(); |
| 424 | } |
| 425 | sector += blk_queue_zone_sectors(q); |
| 426 | } |
| 427 | |
| 428 | if (bio) { |
| 429 | ret = submit_bio_wait(bio); |
| 430 | bio_put(bio); |
| 431 | } |
| 432 | |
| 433 | out: |
| 434 | kfree(d.zbitmap); |
| 435 | |
| 436 | return blkdev_zone_mgmt_errno_to_nvme_status(ret); |
| 437 | } |
| 438 | |
| 439 | static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) |
| 440 | { |
| 441 | int ret; |
| 442 | |
| 443 | switch (zsa_req_op(req->cmd->zms.zsa)) { |
| 444 | case REQ_OP_ZONE_RESET: |
| 445 | ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, |
| 446 | get_capacity(req->ns->bdev->bd_disk), |
| 447 | GFP_KERNEL); |
| 448 | if (ret < 0) |
| 449 | return blkdev_zone_mgmt_errno_to_nvme_status(ret); |
| 450 | break; |
| 451 | case REQ_OP_ZONE_OPEN: |
| 452 | case REQ_OP_ZONE_CLOSE: |
| 453 | case REQ_OP_ZONE_FINISH: |
| 454 | return nvmet_bdev_zone_mgmt_emulate_all(req); |
| 455 | default: |
| 456 | /* this is needed to quiet compiler warning */ |
| 457 | req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); |
| 458 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 459 | } |
| 460 | |
| 461 | return NVME_SC_SUCCESS; |
| 462 | } |
| 463 | |
| 464 | static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) |
| 465 | { |
| 466 | struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); |
| 467 | sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); |
| 468 | enum req_opf op = zsa_req_op(req->cmd->zms.zsa); |
| 469 | struct block_device *bdev = req->ns->bdev; |
| 470 | sector_t zone_sectors = bdev_zone_sectors(bdev); |
| 471 | u16 status = NVME_SC_SUCCESS; |
| 472 | int ret; |
| 473 | |
| 474 | if (op == REQ_OP_LAST) { |
| 475 | req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); |
| 476 | status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; |
| 477 | goto out; |
| 478 | } |
| 479 | |
| 480 | /* when select all bit is set slba field is ignored */ |
| 481 | if (req->cmd->zms.select_all) { |
| 482 | status = nvmet_bdev_execute_zmgmt_send_all(req); |
| 483 | goto out; |
| 484 | } |
| 485 | |
| 486 | if (sect >= get_capacity(bdev->bd_disk)) { |
| 487 | req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); |
| 488 | status = NVME_SC_LBA_RANGE | NVME_SC_DNR; |
| 489 | goto out; |
| 490 | } |
| 491 | |
| 492 | if (sect & (zone_sectors - 1)) { |
| 493 | req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); |
| 494 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 495 | goto out; |
| 496 | } |
| 497 | |
| 498 | ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL); |
| 499 | if (ret < 0) |
| 500 | status = blkdev_zone_mgmt_errno_to_nvme_status(ret); |
| 501 | |
| 502 | out: |
| 503 | nvmet_req_complete(req, status); |
| 504 | } |
| 505 | |
| 506 | void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) |
| 507 | { |
| 508 | INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); |
| 509 | queue_work(zbd_wq, &req->z.zmgmt_work); |
| 510 | } |
| 511 | |
| 512 | static void nvmet_bdev_zone_append_bio_done(struct bio *bio) |
| 513 | { |
| 514 | struct nvmet_req *req = bio->bi_private; |
| 515 | |
| 516 | if (bio->bi_status == BLK_STS_OK) { |
| 517 | req->cqe->result.u64 = |
| 518 | nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); |
| 519 | } |
| 520 | |
| 521 | nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); |
| 522 | nvmet_req_bio_put(req, bio); |
| 523 | } |
| 524 | |
| 525 | void nvmet_bdev_execute_zone_append(struct nvmet_req *req) |
| 526 | { |
| 527 | sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); |
| 528 | u16 status = NVME_SC_SUCCESS; |
| 529 | unsigned int total_len = 0; |
| 530 | struct scatterlist *sg; |
| 531 | struct bio *bio; |
| 532 | int sg_cnt; |
| 533 | |
| 534 | /* Request is completed on len mismatch in nvmet_check_transter_len() */ |
| 535 | if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) |
| 536 | return; |
| 537 | |
| 538 | if (!req->sg_cnt) { |
| 539 | nvmet_req_complete(req, 0); |
| 540 | return; |
| 541 | } |
| 542 | |
| 543 | if (sect >= get_capacity(req->ns->bdev->bd_disk)) { |
| 544 | req->error_loc = offsetof(struct nvme_rw_command, slba); |
| 545 | status = NVME_SC_LBA_RANGE | NVME_SC_DNR; |
| 546 | goto out; |
| 547 | } |
| 548 | |
| 549 | if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { |
| 550 | req->error_loc = offsetof(struct nvme_rw_command, slba); |
| 551 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 552 | goto out; |
| 553 | } |
| 554 | |
| 555 | if (nvmet_use_inline_bvec(req)) { |
| 556 | bio = &req->z.inline_bio; |
| 557 | bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
| 558 | } else { |
| 559 | bio = bio_alloc(GFP_KERNEL, req->sg_cnt); |
| 560 | } |
| 561 | |
| 562 | bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; |
| 563 | bio->bi_end_io = nvmet_bdev_zone_append_bio_done; |
| 564 | bio_set_dev(bio, req->ns->bdev); |
| 565 | bio->bi_iter.bi_sector = sect; |
| 566 | bio->bi_private = req; |
| 567 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) |
| 568 | bio->bi_opf |= REQ_FUA; |
| 569 | |
| 570 | for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { |
| 571 | struct page *p = sg_page(sg); |
| 572 | unsigned int l = sg->length; |
| 573 | unsigned int o = sg->offset; |
| 574 | unsigned int ret; |
| 575 | |
| 576 | ret = bio_add_zone_append_page(bio, p, l, o); |
| 577 | if (ret != sg->length) { |
| 578 | status = NVME_SC_INTERNAL; |
| 579 | goto out_put_bio; |
| 580 | } |
| 581 | total_len += sg->length; |
| 582 | } |
| 583 | |
| 584 | if (total_len != nvmet_rw_data_len(req)) { |
| 585 | status = NVME_SC_INTERNAL | NVME_SC_DNR; |
| 586 | goto out_put_bio; |
| 587 | } |
| 588 | |
| 589 | submit_bio(bio); |
| 590 | return; |
| 591 | |
| 592 | out_put_bio: |
| 593 | nvmet_req_bio_put(req, bio); |
| 594 | out: |
| 595 | nvmet_req_complete(req, status); |
| 596 | } |
| 597 | |
| 598 | u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) |
| 599 | { |
| 600 | struct nvme_command *cmd = req->cmd; |
| 601 | |
| 602 | switch (cmd->common.opcode) { |
| 603 | case nvme_cmd_zone_append: |
| 604 | req->execute = nvmet_bdev_execute_zone_append; |
| 605 | return 0; |
| 606 | case nvme_cmd_zone_mgmt_recv: |
| 607 | req->execute = nvmet_bdev_execute_zone_mgmt_recv; |
| 608 | return 0; |
| 609 | case nvme_cmd_zone_mgmt_send: |
| 610 | req->execute = nvmet_bdev_execute_zone_mgmt_send; |
| 611 | return 0; |
| 612 | default: |
| 613 | return nvmet_bdev_parse_io_cmd(req); |
| 614 | } |
| 615 | } |