Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 1 | /* |
| 2 | * NVMe Fabrics command implementation. |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 15 | #include <linux/blkdev.h> |
| 16 | #include "nvmet.h" |
| 17 | |
| 18 | static void nvmet_execute_prop_set(struct nvmet_req *req) |
| 19 | { |
| 20 | u16 status = 0; |
| 21 | |
| 22 | if (!(req->cmd->prop_set.attrib & 1)) { |
| 23 | u64 val = le64_to_cpu(req->cmd->prop_set.value); |
| 24 | |
| 25 | switch (le32_to_cpu(req->cmd->prop_set.offset)) { |
| 26 | case NVME_REG_CC: |
| 27 | nvmet_update_cc(req->sq->ctrl, val); |
| 28 | break; |
| 29 | default: |
| 30 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 31 | break; |
| 32 | } |
| 33 | } else { |
| 34 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 35 | } |
| 36 | |
| 37 | nvmet_req_complete(req, status); |
| 38 | } |
| 39 | |
| 40 | static void nvmet_execute_prop_get(struct nvmet_req *req) |
| 41 | { |
| 42 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 43 | u16 status = 0; |
| 44 | u64 val = 0; |
| 45 | |
| 46 | if (req->cmd->prop_get.attrib & 1) { |
| 47 | switch (le32_to_cpu(req->cmd->prop_get.offset)) { |
| 48 | case NVME_REG_CAP: |
| 49 | val = ctrl->cap; |
| 50 | break; |
| 51 | default: |
| 52 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 53 | break; |
| 54 | } |
| 55 | } else { |
| 56 | switch (le32_to_cpu(req->cmd->prop_get.offset)) { |
| 57 | case NVME_REG_VS: |
| 58 | val = ctrl->subsys->ver; |
| 59 | break; |
| 60 | case NVME_REG_CC: |
| 61 | val = ctrl->cc; |
| 62 | break; |
| 63 | case NVME_REG_CSTS: |
| 64 | val = ctrl->csts; |
| 65 | break; |
| 66 | default: |
| 67 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 68 | break; |
| 69 | } |
| 70 | } |
| 71 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 72 | req->rsp->result.u64 = cpu_to_le64(val); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 73 | nvmet_req_complete(req, status); |
| 74 | } |
| 75 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame^] | 76 | u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 77 | { |
| 78 | struct nvme_command *cmd = req->cmd; |
| 79 | |
| 80 | req->ns = NULL; |
| 81 | |
| 82 | switch (cmd->fabrics.fctype) { |
| 83 | case nvme_fabrics_type_property_set: |
| 84 | req->data_len = 0; |
| 85 | req->execute = nvmet_execute_prop_set; |
| 86 | break; |
| 87 | case nvme_fabrics_type_property_get: |
| 88 | req->data_len = 0; |
| 89 | req->execute = nvmet_execute_prop_get; |
| 90 | break; |
| 91 | default: |
| 92 | pr_err("received unknown capsule type 0x%x\n", |
| 93 | cmd->fabrics.fctype); |
| 94 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 95 | } |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) |
| 101 | { |
| 102 | struct nvmf_connect_command *c = &req->cmd->connect; |
| 103 | u16 qid = le16_to_cpu(c->qid); |
| 104 | u16 sqsize = le16_to_cpu(c->sqsize); |
| 105 | struct nvmet_ctrl *old; |
| 106 | |
| 107 | old = cmpxchg(&req->sq->ctrl, NULL, ctrl); |
| 108 | if (old) { |
| 109 | pr_warn("queue already connected!\n"); |
| 110 | return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; |
| 111 | } |
| 112 | |
| 113 | nvmet_cq_setup(ctrl, req->cq, qid, sqsize); |
| 114 | nvmet_sq_setup(ctrl, req->sq, qid, sqsize); |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | static void nvmet_execute_admin_connect(struct nvmet_req *req) |
| 119 | { |
| 120 | struct nvmf_connect_command *c = &req->cmd->connect; |
| 121 | struct nvmf_connect_data *d; |
| 122 | struct nvmet_ctrl *ctrl = NULL; |
| 123 | u16 status = 0; |
| 124 | |
| 125 | d = kmap(sg_page(req->sg)) + req->sg->offset; |
| 126 | |
| 127 | /* zero out initial completion result, assign values as needed */ |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 128 | req->rsp->result.u32 = 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 129 | |
| 130 | if (c->recfmt != 0) { |
| 131 | pr_warn("invalid connect version (%d).\n", |
| 132 | le16_to_cpu(c->recfmt)); |
| 133 | status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; |
| 134 | goto out; |
| 135 | } |
| 136 | |
| 137 | if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { |
| 138 | pr_warn("connect attempt for invalid controller ID %#x\n", |
| 139 | d->cntlid); |
| 140 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 141 | req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 142 | goto out; |
| 143 | } |
| 144 | |
| 145 | status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, |
| 146 | le32_to_cpu(c->kato), &ctrl); |
| 147 | if (status) |
| 148 | goto out; |
| 149 | |
| 150 | status = nvmet_install_queue(ctrl, req); |
| 151 | if (status) { |
| 152 | nvmet_ctrl_put(ctrl); |
| 153 | goto out; |
| 154 | } |
| 155 | |
Sagi Grimberg | 15fbad9 | 2016-11-14 14:24:21 +0200 | [diff] [blame] | 156 | pr_info("creating controller %d for subsystem %s for NQN %s.\n", |
| 157 | ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn); |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 158 | req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 159 | |
| 160 | out: |
| 161 | kunmap(sg_page(req->sg)); |
| 162 | nvmet_req_complete(req, status); |
| 163 | } |
| 164 | |
| 165 | static void nvmet_execute_io_connect(struct nvmet_req *req) |
| 166 | { |
| 167 | struct nvmf_connect_command *c = &req->cmd->connect; |
| 168 | struct nvmf_connect_data *d; |
| 169 | struct nvmet_ctrl *ctrl = NULL; |
| 170 | u16 qid = le16_to_cpu(c->qid); |
| 171 | u16 status = 0; |
| 172 | |
| 173 | d = kmap(sg_page(req->sg)) + req->sg->offset; |
| 174 | |
| 175 | /* zero out initial completion result, assign values as needed */ |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 176 | req->rsp->result.u32 = 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 177 | |
| 178 | if (c->recfmt != 0) { |
| 179 | pr_warn("invalid connect version (%d).\n", |
| 180 | le16_to_cpu(c->recfmt)); |
| 181 | status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; |
| 182 | goto out; |
| 183 | } |
| 184 | |
| 185 | status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, |
| 186 | le16_to_cpu(d->cntlid), |
| 187 | req, &ctrl); |
| 188 | if (status) |
| 189 | goto out; |
| 190 | |
| 191 | if (unlikely(qid > ctrl->subsys->max_qid)) { |
| 192 | pr_warn("invalid queue id (%d)\n", qid); |
| 193 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 194 | req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 195 | goto out_ctrl_put; |
| 196 | } |
| 197 | |
| 198 | status = nvmet_install_queue(ctrl, req); |
| 199 | if (status) { |
| 200 | /* pass back cntlid that had the issue of installing queue */ |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 201 | req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 202 | goto out_ctrl_put; |
| 203 | } |
| 204 | |
| 205 | pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); |
| 206 | |
| 207 | out: |
| 208 | kunmap(sg_page(req->sg)); |
| 209 | nvmet_req_complete(req, status); |
| 210 | return; |
| 211 | |
| 212 | out_ctrl_put: |
| 213 | nvmet_ctrl_put(ctrl); |
| 214 | goto out; |
| 215 | } |
| 216 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame^] | 217 | u16 nvmet_parse_connect_cmd(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 218 | { |
| 219 | struct nvme_command *cmd = req->cmd; |
| 220 | |
| 221 | req->ns = NULL; |
| 222 | |
Max Gurtovoy | 2dbf581 | 2017-01-23 11:01:12 +0200 | [diff] [blame] | 223 | if (cmd->common.opcode != nvme_fabrics_command) { |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 224 | pr_err("invalid command 0x%x on unconnected queue.\n", |
| 225 | cmd->fabrics.opcode); |
| 226 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 227 | } |
| 228 | if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { |
| 229 | pr_err("invalid capsule type 0x%x on unconnected queue.\n", |
| 230 | cmd->fabrics.fctype); |
| 231 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 232 | } |
| 233 | |
| 234 | req->data_len = sizeof(struct nvmf_connect_data); |
| 235 | if (cmd->connect.qid == 0) |
| 236 | req->execute = nvmet_execute_admin_connect; |
| 237 | else |
| 238 | req->execute = nvmet_execute_io_connect; |
| 239 | return 0; |
| 240 | } |