Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2020 Western Digital Corporation or its affiliates. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/blkdev.h> |
| 7 | #include <linux/vmalloc.h> |
| 8 | #include "nvme.h" |
| 9 | |
Christoph Hellwig | 7fad20d | 2020-08-20 09:31:36 +0200 | [diff] [blame] | 10 | int nvme_revalidate_zones(struct nvme_ns *ns) |
| 11 | { |
Chaitanya Kulkarni | e6ad559 | 2021-03-03 14:47:17 -0800 | [diff] [blame] | 12 | struct request_queue *q = ns->queue; |
| 13 | int ret; |
| 14 | |
| 15 | ret = blk_revalidate_disk_zones(ns->disk, NULL); |
| 16 | if (!ret) |
| 17 | blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); |
| 18 | return ret; |
Christoph Hellwig | 7fad20d | 2020-08-20 09:31:36 +0200 | [diff] [blame] | 19 | } |
| 20 | |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 21 | static int nvme_set_max_append(struct nvme_ctrl *ctrl) |
| 22 | { |
| 23 | struct nvme_command c = { }; |
| 24 | struct nvme_id_ctrl_zns *id; |
| 25 | int status; |
| 26 | |
| 27 | id = kzalloc(sizeof(*id), GFP_KERNEL); |
| 28 | if (!id) |
| 29 | return -ENOMEM; |
| 30 | |
| 31 | c.identify.opcode = nvme_admin_identify; |
| 32 | c.identify.cns = NVME_ID_CNS_CS_CTRL; |
| 33 | c.identify.csi = NVME_CSI_ZNS; |
| 34 | |
| 35 | status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); |
| 36 | if (status) { |
| 37 | kfree(id); |
| 38 | return status; |
| 39 | } |
| 40 | |
| 41 | if (id->zasl) |
| 42 | ctrl->max_zone_append = 1 << (id->zasl + 3); |
| 43 | else |
| 44 | ctrl->max_zone_append = ctrl->max_hw_sectors; |
| 45 | kfree(id); |
| 46 | return 0; |
| 47 | } |
| 48 | |
Christoph Hellwig | d525c3c | 2020-08-20 14:02:18 +0200 | [diff] [blame] | 49 | int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 50 | { |
| 51 | struct nvme_effects_log *log = ns->head->effects; |
Christoph Hellwig | d525c3c | 2020-08-20 14:02:18 +0200 | [diff] [blame] | 52 | struct request_queue *q = ns->queue; |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 53 | struct nvme_command c = { }; |
| 54 | struct nvme_id_ns_zns *id; |
| 55 | int status; |
| 56 | |
| 57 | /* Driver requires zone append support */ |
Javier González | 2f4c9ba | 2020-12-01 13:02:21 +0100 | [diff] [blame] | 58 | if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) & |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 59 | NVME_CMD_EFFECTS_CSUPP)) { |
Javier González | 2f4c9ba | 2020-12-01 13:02:21 +0100 | [diff] [blame] | 60 | if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags)) |
| 61 | dev_warn(ns->ctrl->device, |
| 62 | "Zone Append supported for zoned namespace:%d. Remove read-only mode\n", |
| 63 | ns->head->ns_id); |
| 64 | } else { |
| 65 | set_bit(NVME_NS_FORCE_RO, &ns->flags); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 66 | dev_warn(ns->ctrl->device, |
Javier González | 2f4c9ba | 2020-12-01 13:02:21 +0100 | [diff] [blame] | 67 | "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n", |
| 68 | ns->head->ns_id); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | /* Lazily query controller append limit for the first zoned namespace */ |
| 72 | if (!ns->ctrl->max_zone_append) { |
| 73 | status = nvme_set_max_append(ns->ctrl); |
| 74 | if (status) |
| 75 | return status; |
| 76 | } |
| 77 | |
| 78 | id = kzalloc(sizeof(*id), GFP_KERNEL); |
| 79 | if (!id) |
| 80 | return -ENOMEM; |
| 81 | |
| 82 | c.identify.opcode = nvme_admin_identify; |
| 83 | c.identify.nsid = cpu_to_le32(ns->head->ns_id); |
| 84 | c.identify.cns = NVME_ID_CNS_CS_NS; |
| 85 | c.identify.csi = NVME_CSI_ZNS; |
| 86 | |
| 87 | status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); |
| 88 | if (status) |
| 89 | goto free_data; |
| 90 | |
| 91 | /* |
| 92 | * We currently do not handle devices requiring any of the zoned |
| 93 | * operation characteristics. |
| 94 | */ |
| 95 | if (id->zoc) { |
| 96 | dev_warn(ns->ctrl->device, |
| 97 | "zone operations:%x not supported for namespace:%u\n", |
| 98 | le16_to_cpu(id->zoc), ns->head->ns_id); |
Christoph Hellwig | a9e0e6b | 2021-04-07 15:03:16 +0200 | [diff] [blame] | 99 | status = -ENODEV; |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 100 | goto free_data; |
| 101 | } |
| 102 | |
| 103 | ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze)); |
| 104 | if (!is_power_of_2(ns->zsze)) { |
| 105 | dev_warn(ns->ctrl->device, |
| 106 | "invalid zone size:%llu for namespace:%u\n", |
| 107 | ns->zsze, ns->head->ns_id); |
Christoph Hellwig | a9e0e6b | 2021-04-07 15:03:16 +0200 | [diff] [blame] | 108 | status = -ENODEV; |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 109 | goto free_data; |
| 110 | } |
| 111 | |
Damien Le Moal | 73d9038 | 2021-01-28 13:47:27 +0900 | [diff] [blame] | 112 | blk_queue_set_zoned(ns->disk, BLK_ZONED_HM); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 113 | blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); |
Niklas Cassel | e15864f | 2020-07-14 23:18:23 +0200 | [diff] [blame] | 114 | blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); |
Niklas Cassel | 659bf82 | 2020-07-14 23:18:24 +0200 | [diff] [blame] | 115 | blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 116 | free_data: |
| 117 | kfree(id); |
| 118 | return status; |
| 119 | } |
| 120 | |
| 121 | static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, |
| 122 | unsigned int nr_zones, size_t *buflen) |
| 123 | { |
| 124 | struct request_queue *q = ns->disk->queue; |
| 125 | size_t bufsize; |
| 126 | void *buf; |
| 127 | |
| 128 | const size_t min_bufsize = sizeof(struct nvme_zone_report) + |
| 129 | sizeof(struct nvme_zone_descriptor); |
| 130 | |
| 131 | nr_zones = min_t(unsigned int, nr_zones, |
| 132 | get_capacity(ns->disk) >> ilog2(ns->zsze)); |
| 133 | |
| 134 | bufsize = sizeof(struct nvme_zone_report) + |
| 135 | nr_zones * sizeof(struct nvme_zone_descriptor); |
| 136 | bufsize = min_t(size_t, bufsize, |
| 137 | queue_max_hw_sectors(q) << SECTOR_SHIFT); |
| 138 | bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); |
| 139 | |
| 140 | while (bufsize >= min_bufsize) { |
| 141 | buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); |
| 142 | if (buf) { |
| 143 | *buflen = bufsize; |
| 144 | return buf; |
| 145 | } |
| 146 | bufsize >>= 1; |
| 147 | } |
| 148 | return NULL; |
| 149 | } |
| 150 | |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 151 | static int nvme_zone_parse_entry(struct nvme_ns *ns, |
| 152 | struct nvme_zone_descriptor *entry, |
| 153 | unsigned int idx, report_zones_cb cb, |
| 154 | void *data) |
| 155 | { |
| 156 | struct blk_zone zone = { }; |
| 157 | |
| 158 | if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { |
| 159 | dev_err(ns->ctrl->device, "invalid zone type %#x\n", |
| 160 | entry->zt); |
| 161 | return -EINVAL; |
| 162 | } |
| 163 | |
| 164 | zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; |
| 165 | zone.cond = entry->zs >> 4; |
| 166 | zone.len = ns->zsze; |
| 167 | zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); |
| 168 | zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); |
Niklas Cassel | 793fcab | 2021-11-26 10:42:44 +0000 | [diff] [blame] | 169 | if (zone.cond == BLK_ZONE_COND_FULL) |
| 170 | zone.wp = zone.start + zone.len; |
| 171 | else |
| 172 | zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 173 | |
| 174 | return cb(&zone, idx, data); |
| 175 | } |
| 176 | |
Christoph Hellwig | 8b4fb0f | 2021-05-19 09:17:06 +0200 | [diff] [blame] | 177 | int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, |
| 178 | unsigned int nr_zones, report_zones_cb cb, void *data) |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 179 | { |
| 180 | struct nvme_zone_report *report; |
Christoph Hellwig | 936fab5 | 2020-08-30 12:00:10 +0200 | [diff] [blame] | 181 | struct nvme_command c = { }; |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 182 | int ret, zone_idx = 0; |
| 183 | unsigned int nz, i; |
| 184 | size_t buflen; |
| 185 | |
Christoph Hellwig | d8ca66e | 2021-05-19 09:11:54 +0200 | [diff] [blame] | 186 | if (ns->head->ids.csi != NVME_CSI_ZNS) |
| 187 | return -EINVAL; |
| 188 | |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 189 | report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); |
| 190 | if (!report) |
| 191 | return -ENOMEM; |
| 192 | |
Christoph Hellwig | 936fab5 | 2020-08-30 12:00:10 +0200 | [diff] [blame] | 193 | c.zmr.opcode = nvme_cmd_zone_mgmt_recv; |
| 194 | c.zmr.nsid = cpu_to_le32(ns->head->ns_id); |
| 195 | c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen)); |
| 196 | c.zmr.zra = NVME_ZRA_ZONE_REPORT; |
| 197 | c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; |
| 198 | c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; |
| 199 | |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 200 | sector &= ~(ns->zsze - 1); |
| 201 | while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { |
| 202 | memset(report, 0, buflen); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 203 | |
Christoph Hellwig | 936fab5 | 2020-08-30 12:00:10 +0200 | [diff] [blame] | 204 | c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); |
| 205 | ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); |
| 206 | if (ret) { |
| 207 | if (ret > 0) |
| 208 | ret = -EIO; |
| 209 | goto out_free; |
| 210 | } |
| 211 | |
| 212 | nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 213 | if (!nz) |
| 214 | break; |
| 215 | |
| 216 | for (i = 0; i < nz && zone_idx < nr_zones; i++) { |
| 217 | ret = nvme_zone_parse_entry(ns, &report->entries[i], |
| 218 | zone_idx, cb, data); |
| 219 | if (ret) |
| 220 | goto out_free; |
| 221 | zone_idx++; |
| 222 | } |
| 223 | |
| 224 | sector += ns->zsze * nz; |
| 225 | } |
| 226 | |
| 227 | if (zone_idx > 0) |
| 228 | ret = zone_idx; |
| 229 | else |
| 230 | ret = -EINVAL; |
| 231 | out_free: |
| 232 | kvfree(report); |
| 233 | return ret; |
| 234 | } |
| 235 | |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 236 | blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, |
| 237 | struct nvme_command *c, enum nvme_zone_mgmt_action action) |
| 238 | { |
Jens Axboe | 9c3d292 | 2021-10-18 06:45:06 -0600 | [diff] [blame] | 239 | memset(c, 0, sizeof(*c)); |
| 240 | |
Keith Busch | 240e6ee | 2020-06-29 12:06:41 -0700 | [diff] [blame] | 241 | c->zms.opcode = nvme_cmd_zone_mgmt_send; |
| 242 | c->zms.nsid = cpu_to_le32(ns->head->ns_id); |
| 243 | c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); |
| 244 | c->zms.zsa = action; |
| 245 | |
| 246 | if (req_op(req) == REQ_OP_ZONE_RESET_ALL) |
| 247 | c->zms.select_all = 1; |
| 248 | |
| 249 | return BLK_STS_OK; |
| 250 | } |