James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Avago Technologies. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful. |
| 9 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, |
| 10 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A |
| 11 | * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO |
| 12 | * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. |
| 13 | * See the GNU General Public License for more details, a copy of which |
| 14 | * can be found in the file COPYING included with this package |
| 15 | * |
| 16 | */ |
| 17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/blk-mq.h> |
| 21 | #include <linux/parser.h> |
| 22 | #include <linux/random.h> |
| 23 | #include <uapi/scsi/fc/fc_fs.h> |
| 24 | #include <uapi/scsi/fc/fc_els.h> |
| 25 | |
| 26 | #include "nvmet.h" |
| 27 | #include <linux/nvme-fc-driver.h> |
| 28 | #include <linux/nvme-fc.h> |
| 29 | |
| 30 | |
| 31 | /* *************************** Data Structures/Defines ****************** */ |
| 32 | |
| 33 | |
| 34 | #define NVMET_LS_CTX_COUNT 4 |
| 35 | |
| 36 | /* for this implementation, assume small single frame rqst/rsp */ |
| 37 | #define NVME_FC_MAX_LS_BUFFER_SIZE 2048 |
| 38 | |
| 39 | struct nvmet_fc_tgtport; |
| 40 | struct nvmet_fc_tgt_assoc; |
| 41 | |
| 42 | struct nvmet_fc_ls_iod { |
| 43 | struct nvmefc_tgt_ls_req *lsreq; |
| 44 | struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ |
| 45 | |
| 46 | struct list_head ls_list; /* tgtport->ls_list */ |
| 47 | |
| 48 | struct nvmet_fc_tgtport *tgtport; |
| 49 | struct nvmet_fc_tgt_assoc *assoc; |
| 50 | |
| 51 | u8 *rqstbuf; |
| 52 | u8 *rspbuf; |
| 53 | u16 rqstdatalen; |
| 54 | dma_addr_t rspdma; |
| 55 | |
| 56 | struct scatterlist sg[2]; |
| 57 | |
| 58 | struct work_struct work; |
| 59 | } __aligned(sizeof(unsigned long long)); |
| 60 | |
| 61 | #define NVMET_FC_MAX_KB_PER_XFR 256 |
| 62 | |
| 63 | enum nvmet_fcp_datadir { |
| 64 | NVMET_FCP_NODATA, |
| 65 | NVMET_FCP_WRITE, |
| 66 | NVMET_FCP_READ, |
| 67 | NVMET_FCP_ABORTED, |
| 68 | }; |
| 69 | |
| 70 | struct nvmet_fc_fcp_iod { |
| 71 | struct nvmefc_tgt_fcp_req *fcpreq; |
| 72 | |
| 73 | struct nvme_fc_cmd_iu cmdiubuf; |
| 74 | struct nvme_fc_ersp_iu rspiubuf; |
| 75 | dma_addr_t rspdma; |
| 76 | struct scatterlist *data_sg; |
| 77 | struct scatterlist *next_sg; |
| 78 | int data_sg_cnt; |
| 79 | u32 next_sg_offset; |
| 80 | u32 total_length; |
| 81 | u32 offset; |
| 82 | enum nvmet_fcp_datadir io_dir; |
| 83 | bool active; |
| 84 | bool abort; |
| 85 | spinlock_t flock; |
| 86 | |
| 87 | struct nvmet_req req; |
| 88 | struct work_struct work; |
| 89 | |
| 90 | struct nvmet_fc_tgtport *tgtport; |
| 91 | struct nvmet_fc_tgt_queue *queue; |
| 92 | |
| 93 | struct list_head fcp_list; /* tgtport->fcp_list */ |
| 94 | }; |
| 95 | |
| 96 | struct nvmet_fc_tgtport { |
| 97 | |
| 98 | struct nvmet_fc_target_port fc_target_port; |
| 99 | |
| 100 | struct list_head tgt_list; /* nvmet_fc_target_list */ |
| 101 | struct device *dev; /* dev for dma mapping */ |
| 102 | struct nvmet_fc_target_template *ops; |
| 103 | |
| 104 | struct nvmet_fc_ls_iod *iod; |
| 105 | spinlock_t lock; |
| 106 | struct list_head ls_list; |
| 107 | struct list_head ls_busylist; |
| 108 | struct list_head assoc_list; |
| 109 | struct ida assoc_cnt; |
| 110 | struct nvmet_port *port; |
| 111 | struct kref ref; |
| 112 | }; |
| 113 | |
| 114 | struct nvmet_fc_tgt_queue { |
| 115 | bool ninetypercent; |
| 116 | u16 qid; |
| 117 | u16 sqsize; |
| 118 | u16 ersp_ratio; |
| 119 | u16 sqhd; |
| 120 | int cpu; |
| 121 | atomic_t connected; |
| 122 | atomic_t sqtail; |
| 123 | atomic_t zrspcnt; |
| 124 | atomic_t rsn; |
| 125 | spinlock_t qlock; |
| 126 | struct nvmet_port *port; |
| 127 | struct nvmet_cq nvme_cq; |
| 128 | struct nvmet_sq nvme_sq; |
| 129 | struct nvmet_fc_tgt_assoc *assoc; |
| 130 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ |
| 131 | struct list_head fod_list; |
| 132 | struct workqueue_struct *work_q; |
| 133 | struct kref ref; |
| 134 | } __aligned(sizeof(unsigned long long)); |
| 135 | |
| 136 | struct nvmet_fc_tgt_assoc { |
| 137 | u64 association_id; |
| 138 | u32 a_id; |
| 139 | struct nvmet_fc_tgtport *tgtport; |
| 140 | struct list_head a_list; |
| 141 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; |
| 142 | struct kref ref; |
| 143 | }; |
| 144 | |
| 145 | |
| 146 | static inline int |
| 147 | nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) |
| 148 | { |
| 149 | return (iodptr - iodptr->tgtport->iod); |
| 150 | } |
| 151 | |
| 152 | static inline int |
| 153 | nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) |
| 154 | { |
| 155 | return (fodptr - fodptr->queue->fod); |
| 156 | } |
| 157 | |
| 158 | |
| 159 | /* |
| 160 | * Association and Connection IDs: |
| 161 | * |
| 162 | * Association ID will have random number in upper 6 bytes and zero |
| 163 | * in lower 2 bytes |
| 164 | * |
| 165 | * Connection IDs will be Association ID with QID or'd in lower 2 bytes |
| 166 | * |
| 167 | * note: Association ID = Connection ID for queue 0 |
| 168 | */ |
| 169 | #define BYTES_FOR_QID sizeof(u16) |
| 170 | #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) |
| 171 | #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) |
| 172 | |
| 173 | static inline u64 |
| 174 | nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) |
| 175 | { |
| 176 | return (assoc->association_id | qid); |
| 177 | } |
| 178 | |
| 179 | static inline u64 |
| 180 | nvmet_fc_getassociationid(u64 connectionid) |
| 181 | { |
| 182 | return connectionid & ~NVMET_FC_QUEUEID_MASK; |
| 183 | } |
| 184 | |
| 185 | static inline u16 |
| 186 | nvmet_fc_getqueueid(u64 connectionid) |
| 187 | { |
| 188 | return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); |
| 189 | } |
| 190 | |
| 191 | static inline struct nvmet_fc_tgtport * |
| 192 | targetport_to_tgtport(struct nvmet_fc_target_port *targetport) |
| 193 | { |
| 194 | return container_of(targetport, struct nvmet_fc_tgtport, |
| 195 | fc_target_port); |
| 196 | } |
| 197 | |
| 198 | static inline struct nvmet_fc_fcp_iod * |
| 199 | nvmet_req_to_fod(struct nvmet_req *nvme_req) |
| 200 | { |
| 201 | return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); |
| 202 | } |
| 203 | |
| 204 | |
| 205 | /* *************************** Globals **************************** */ |
| 206 | |
| 207 | |
| 208 | static DEFINE_SPINLOCK(nvmet_fc_tgtlock); |
| 209 | |
| 210 | static LIST_HEAD(nvmet_fc_target_list); |
| 211 | static DEFINE_IDA(nvmet_fc_tgtport_cnt); |
| 212 | |
| 213 | |
| 214 | static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); |
| 215 | static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work); |
| 216 | static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); |
| 217 | static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); |
| 218 | static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); |
| 219 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
| 220 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
| 221 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
| 222 | |
| 223 | |
| 224 | /* *********************** FC-NVME DMA Handling **************************** */ |
| 225 | |
| 226 | /* |
| 227 | * The fcloop device passes in a NULL device pointer. Real LLD's will |
| 228 | * pass in a valid device pointer. If NULL is passed to the dma mapping |
| 229 | * routines, depending on the platform, it may or may not succeed, and |
| 230 | * may crash. |
| 231 | * |
| 232 | * As such: |
| 233 | * Wrapper all the dma routines and check the dev pointer. |
| 234 | * |
| 235 | * If simple mappings (return just a dma address, we'll noop them, |
| 236 | * returning a dma address of 0. |
| 237 | * |
| 238 | * On more complex mappings (dma_map_sg), a pseudo routine fills |
| 239 | * in the scatter list, setting all dma addresses to 0. |
| 240 | */ |
| 241 | |
| 242 | static inline dma_addr_t |
| 243 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, |
| 244 | enum dma_data_direction dir) |
| 245 | { |
| 246 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; |
| 247 | } |
| 248 | |
| 249 | static inline int |
| 250 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 251 | { |
| 252 | return dev ? dma_mapping_error(dev, dma_addr) : 0; |
| 253 | } |
| 254 | |
| 255 | static inline void |
| 256 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
| 257 | enum dma_data_direction dir) |
| 258 | { |
| 259 | if (dev) |
| 260 | dma_unmap_single(dev, addr, size, dir); |
| 261 | } |
| 262 | |
| 263 | static inline void |
| 264 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
| 265 | enum dma_data_direction dir) |
| 266 | { |
| 267 | if (dev) |
| 268 | dma_sync_single_for_cpu(dev, addr, size, dir); |
| 269 | } |
| 270 | |
| 271 | static inline void |
| 272 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, |
| 273 | enum dma_data_direction dir) |
| 274 | { |
| 275 | if (dev) |
| 276 | dma_sync_single_for_device(dev, addr, size, dir); |
| 277 | } |
| 278 | |
| 279 | /* pseudo dma_map_sg call */ |
| 280 | static int |
| 281 | fc_map_sg(struct scatterlist *sg, int nents) |
| 282 | { |
| 283 | struct scatterlist *s; |
| 284 | int i; |
| 285 | |
| 286 | WARN_ON(nents == 0 || sg[0].length == 0); |
| 287 | |
| 288 | for_each_sg(sg, s, nents, i) { |
| 289 | s->dma_address = 0L; |
| 290 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
| 291 | s->dma_length = s->length; |
| 292 | #endif |
| 293 | } |
| 294 | return nents; |
| 295 | } |
| 296 | |
| 297 | static inline int |
| 298 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 299 | enum dma_data_direction dir) |
| 300 | { |
| 301 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); |
| 302 | } |
| 303 | |
| 304 | static inline void |
| 305 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 306 | enum dma_data_direction dir) |
| 307 | { |
| 308 | if (dev) |
| 309 | dma_unmap_sg(dev, sg, nents, dir); |
| 310 | } |
| 311 | |
| 312 | |
| 313 | /* *********************** FC-NVME Port Management ************************ */ |
| 314 | |
| 315 | |
| 316 | static int |
| 317 | nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
| 318 | { |
| 319 | struct nvmet_fc_ls_iod *iod; |
| 320 | int i; |
| 321 | |
| 322 | iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), |
| 323 | GFP_KERNEL); |
| 324 | if (!iod) |
| 325 | return -ENOMEM; |
| 326 | |
| 327 | tgtport->iod = iod; |
| 328 | |
| 329 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
| 330 | INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); |
| 331 | iod->tgtport = tgtport; |
| 332 | list_add_tail(&iod->ls_list, &tgtport->ls_list); |
| 333 | |
| 334 | iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE, |
| 335 | GFP_KERNEL); |
| 336 | if (!iod->rqstbuf) |
| 337 | goto out_fail; |
| 338 | |
| 339 | iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE; |
| 340 | |
| 341 | iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, |
| 342 | NVME_FC_MAX_LS_BUFFER_SIZE, |
| 343 | DMA_TO_DEVICE); |
| 344 | if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) |
| 345 | goto out_fail; |
| 346 | } |
| 347 | |
| 348 | return 0; |
| 349 | |
| 350 | out_fail: |
| 351 | kfree(iod->rqstbuf); |
| 352 | list_del(&iod->ls_list); |
| 353 | for (iod--, i--; i >= 0; iod--, i--) { |
| 354 | fc_dma_unmap_single(tgtport->dev, iod->rspdma, |
| 355 | NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); |
| 356 | kfree(iod->rqstbuf); |
| 357 | list_del(&iod->ls_list); |
| 358 | } |
| 359 | |
| 360 | kfree(iod); |
| 361 | |
| 362 | return -EFAULT; |
| 363 | } |
| 364 | |
| 365 | static void |
| 366 | nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
| 367 | { |
| 368 | struct nvmet_fc_ls_iod *iod = tgtport->iod; |
| 369 | int i; |
| 370 | |
| 371 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
| 372 | fc_dma_unmap_single(tgtport->dev, |
| 373 | iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, |
| 374 | DMA_TO_DEVICE); |
| 375 | kfree(iod->rqstbuf); |
| 376 | list_del(&iod->ls_list); |
| 377 | } |
| 378 | kfree(tgtport->iod); |
| 379 | } |
| 380 | |
| 381 | static struct nvmet_fc_ls_iod * |
| 382 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) |
| 383 | { |
| 384 | static struct nvmet_fc_ls_iod *iod; |
| 385 | unsigned long flags; |
| 386 | |
| 387 | spin_lock_irqsave(&tgtport->lock, flags); |
| 388 | iod = list_first_entry_or_null(&tgtport->ls_list, |
| 389 | struct nvmet_fc_ls_iod, ls_list); |
| 390 | if (iod) |
| 391 | list_move_tail(&iod->ls_list, &tgtport->ls_busylist); |
| 392 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 393 | return iod; |
| 394 | } |
| 395 | |
| 396 | |
| 397 | static void |
| 398 | nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, |
| 399 | struct nvmet_fc_ls_iod *iod) |
| 400 | { |
| 401 | unsigned long flags; |
| 402 | |
| 403 | spin_lock_irqsave(&tgtport->lock, flags); |
| 404 | list_move(&iod->ls_list, &tgtport->ls_list); |
| 405 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 406 | } |
| 407 | |
| 408 | static void |
| 409 | nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
| 410 | struct nvmet_fc_tgt_queue *queue) |
| 411 | { |
| 412 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 413 | int i; |
| 414 | |
| 415 | for (i = 0; i < queue->sqsize; fod++, i++) { |
| 416 | INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work); |
| 417 | fod->tgtport = tgtport; |
| 418 | fod->queue = queue; |
| 419 | fod->active = false; |
| 420 | list_add_tail(&fod->fcp_list, &queue->fod_list); |
| 421 | spin_lock_init(&fod->flock); |
| 422 | |
| 423 | fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, |
| 424 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); |
| 425 | if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { |
| 426 | list_del(&fod->fcp_list); |
| 427 | for (fod--, i--; i >= 0; fod--, i--) { |
| 428 | fc_dma_unmap_single(tgtport->dev, fod->rspdma, |
| 429 | sizeof(fod->rspiubuf), |
| 430 | DMA_TO_DEVICE); |
| 431 | fod->rspdma = 0L; |
| 432 | list_del(&fod->fcp_list); |
| 433 | } |
| 434 | |
| 435 | return; |
| 436 | } |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | static void |
| 441 | nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
| 442 | struct nvmet_fc_tgt_queue *queue) |
| 443 | { |
| 444 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 445 | int i; |
| 446 | |
| 447 | for (i = 0; i < queue->sqsize; fod++, i++) { |
| 448 | if (fod->rspdma) |
| 449 | fc_dma_unmap_single(tgtport->dev, fod->rspdma, |
| 450 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); |
| 451 | } |
| 452 | } |
| 453 | |
| 454 | static struct nvmet_fc_fcp_iod * |
| 455 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
| 456 | { |
| 457 | static struct nvmet_fc_fcp_iod *fod; |
| 458 | unsigned long flags; |
| 459 | |
| 460 | spin_lock_irqsave(&queue->qlock, flags); |
| 461 | fod = list_first_entry_or_null(&queue->fod_list, |
| 462 | struct nvmet_fc_fcp_iod, fcp_list); |
| 463 | if (fod) { |
| 464 | list_del(&fod->fcp_list); |
| 465 | fod->active = true; |
| 466 | fod->abort = false; |
| 467 | /* |
| 468 | * no queue reference is taken, as it was taken by the |
| 469 | * queue lookup just prior to the allocation. The iod |
| 470 | * will "inherit" that reference. |
| 471 | */ |
| 472 | } |
| 473 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 474 | return fod; |
| 475 | } |
| 476 | |
| 477 | |
| 478 | static void |
| 479 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
| 480 | struct nvmet_fc_fcp_iod *fod) |
| 481 | { |
| 482 | unsigned long flags; |
| 483 | |
| 484 | spin_lock_irqsave(&queue->qlock, flags); |
| 485 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); |
| 486 | fod->active = false; |
| 487 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 488 | |
| 489 | /* |
| 490 | * release the reference taken at queue lookup and fod allocation |
| 491 | */ |
| 492 | nvmet_fc_tgt_q_put(queue); |
| 493 | } |
| 494 | |
| 495 | static int |
| 496 | nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) |
| 497 | { |
| 498 | int cpu, idx, cnt; |
| 499 | |
| 500 | if (!(tgtport->ops->target_features & |
| 501 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) || |
| 502 | tgtport->ops->max_hw_queues == 1) |
| 503 | return WORK_CPU_UNBOUND; |
| 504 | |
| 505 | /* Simple cpu selection based on qid modulo active cpu count */ |
| 506 | idx = !qid ? 0 : (qid - 1) % num_active_cpus(); |
| 507 | |
| 508 | /* find the n'th active cpu */ |
| 509 | for (cpu = 0, cnt = 0; ; ) { |
| 510 | if (cpu_active(cpu)) { |
| 511 | if (cnt == idx) |
| 512 | break; |
| 513 | cnt++; |
| 514 | } |
| 515 | cpu = (cpu + 1) % num_possible_cpus(); |
| 516 | } |
| 517 | |
| 518 | return cpu; |
| 519 | } |
| 520 | |
| 521 | static struct nvmet_fc_tgt_queue * |
| 522 | nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, |
| 523 | u16 qid, u16 sqsize) |
| 524 | { |
| 525 | struct nvmet_fc_tgt_queue *queue; |
| 526 | unsigned long flags; |
| 527 | int ret; |
| 528 | |
| 529 | if (qid >= NVMET_NR_QUEUES) |
| 530 | return NULL; |
| 531 | |
| 532 | queue = kzalloc((sizeof(*queue) + |
| 533 | (sizeof(struct nvmet_fc_fcp_iod) * sqsize)), |
| 534 | GFP_KERNEL); |
| 535 | if (!queue) |
| 536 | return NULL; |
| 537 | |
| 538 | if (!nvmet_fc_tgt_a_get(assoc)) |
| 539 | goto out_free_queue; |
| 540 | |
| 541 | queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, |
| 542 | assoc->tgtport->fc_target_port.port_num, |
| 543 | assoc->a_id, qid); |
| 544 | if (!queue->work_q) |
| 545 | goto out_a_put; |
| 546 | |
| 547 | queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1]; |
| 548 | queue->qid = qid; |
| 549 | queue->sqsize = sqsize; |
| 550 | queue->assoc = assoc; |
| 551 | queue->port = assoc->tgtport->port; |
| 552 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); |
| 553 | INIT_LIST_HEAD(&queue->fod_list); |
| 554 | atomic_set(&queue->connected, 0); |
| 555 | atomic_set(&queue->sqtail, 0); |
| 556 | atomic_set(&queue->rsn, 1); |
| 557 | atomic_set(&queue->zrspcnt, 0); |
| 558 | spin_lock_init(&queue->qlock); |
| 559 | kref_init(&queue->ref); |
| 560 | |
| 561 | nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); |
| 562 | |
| 563 | ret = nvmet_sq_init(&queue->nvme_sq); |
| 564 | if (ret) |
| 565 | goto out_fail_iodlist; |
| 566 | |
| 567 | WARN_ON(assoc->queues[qid]); |
| 568 | spin_lock_irqsave(&assoc->tgtport->lock, flags); |
| 569 | assoc->queues[qid] = queue; |
| 570 | spin_unlock_irqrestore(&assoc->tgtport->lock, flags); |
| 571 | |
| 572 | return queue; |
| 573 | |
| 574 | out_fail_iodlist: |
| 575 | nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); |
| 576 | destroy_workqueue(queue->work_q); |
| 577 | out_a_put: |
| 578 | nvmet_fc_tgt_a_put(assoc); |
| 579 | out_free_queue: |
| 580 | kfree(queue); |
| 581 | return NULL; |
| 582 | } |
| 583 | |
| 584 | |
| 585 | static void |
| 586 | nvmet_fc_tgt_queue_free(struct kref *ref) |
| 587 | { |
| 588 | struct nvmet_fc_tgt_queue *queue = |
| 589 | container_of(ref, struct nvmet_fc_tgt_queue, ref); |
| 590 | unsigned long flags; |
| 591 | |
| 592 | spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); |
| 593 | queue->assoc->queues[queue->qid] = NULL; |
| 594 | spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); |
| 595 | |
| 596 | nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); |
| 597 | |
| 598 | nvmet_fc_tgt_a_put(queue->assoc); |
| 599 | |
| 600 | destroy_workqueue(queue->work_q); |
| 601 | |
| 602 | kfree(queue); |
| 603 | } |
| 604 | |
| 605 | static void |
| 606 | nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) |
| 607 | { |
| 608 | kref_put(&queue->ref, nvmet_fc_tgt_queue_free); |
| 609 | } |
| 610 | |
| 611 | static int |
| 612 | nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) |
| 613 | { |
| 614 | return kref_get_unless_zero(&queue->ref); |
| 615 | } |
| 616 | |
| 617 | |
| 618 | static void |
| 619 | nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, |
| 620 | struct nvmefc_tgt_fcp_req *fcpreq) |
| 621 | { |
| 622 | int ret; |
| 623 | |
| 624 | fcpreq->op = NVMET_FCOP_ABORT; |
| 625 | fcpreq->offset = 0; |
| 626 | fcpreq->timeout = 0; |
| 627 | fcpreq->transfer_length = 0; |
| 628 | fcpreq->transferred_length = 0; |
| 629 | fcpreq->fcp_error = 0; |
| 630 | fcpreq->sg_cnt = 0; |
| 631 | |
| 632 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq); |
| 633 | if (ret) |
| 634 | /* should never reach here !! */ |
| 635 | WARN_ON(1); |
| 636 | } |
| 637 | |
| 638 | |
| 639 | static void |
| 640 | nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) |
| 641 | { |
| 642 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 643 | unsigned long flags; |
| 644 | int i; |
| 645 | bool disconnect; |
| 646 | |
| 647 | disconnect = atomic_xchg(&queue->connected, 0); |
| 648 | |
| 649 | spin_lock_irqsave(&queue->qlock, flags); |
| 650 | /* about outstanding io's */ |
| 651 | for (i = 0; i < queue->sqsize; fod++, i++) { |
| 652 | if (fod->active) { |
| 653 | spin_lock(&fod->flock); |
| 654 | fod->abort = true; |
| 655 | spin_unlock(&fod->flock); |
| 656 | } |
| 657 | } |
| 658 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 659 | |
| 660 | flush_workqueue(queue->work_q); |
| 661 | |
| 662 | if (disconnect) |
| 663 | nvmet_sq_destroy(&queue->nvme_sq); |
| 664 | |
| 665 | nvmet_fc_tgt_q_put(queue); |
| 666 | } |
| 667 | |
| 668 | static struct nvmet_fc_tgt_queue * |
| 669 | nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, |
| 670 | u64 connection_id) |
| 671 | { |
| 672 | struct nvmet_fc_tgt_assoc *assoc; |
| 673 | struct nvmet_fc_tgt_queue *queue; |
| 674 | u64 association_id = nvmet_fc_getassociationid(connection_id); |
| 675 | u16 qid = nvmet_fc_getqueueid(connection_id); |
| 676 | unsigned long flags; |
| 677 | |
| 678 | spin_lock_irqsave(&tgtport->lock, flags); |
| 679 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { |
| 680 | if (association_id == assoc->association_id) { |
| 681 | queue = assoc->queues[qid]; |
| 682 | if (queue && |
| 683 | (!atomic_read(&queue->connected) || |
| 684 | !nvmet_fc_tgt_q_get(queue))) |
| 685 | queue = NULL; |
| 686 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 687 | return queue; |
| 688 | } |
| 689 | } |
| 690 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 691 | return NULL; |
| 692 | } |
| 693 | |
| 694 | static struct nvmet_fc_tgt_assoc * |
| 695 | nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport) |
| 696 | { |
| 697 | struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; |
| 698 | unsigned long flags; |
| 699 | u64 ran; |
| 700 | int idx; |
| 701 | bool needrandom = true; |
| 702 | |
| 703 | assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); |
| 704 | if (!assoc) |
| 705 | return NULL; |
| 706 | |
| 707 | idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); |
| 708 | if (idx < 0) |
| 709 | goto out_free_assoc; |
| 710 | |
| 711 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 712 | goto out_ida_put; |
| 713 | |
| 714 | assoc->tgtport = tgtport; |
| 715 | assoc->a_id = idx; |
| 716 | INIT_LIST_HEAD(&assoc->a_list); |
| 717 | kref_init(&assoc->ref); |
| 718 | |
| 719 | while (needrandom) { |
| 720 | get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); |
| 721 | ran = ran << BYTES_FOR_QID_SHIFT; |
| 722 | |
| 723 | spin_lock_irqsave(&tgtport->lock, flags); |
| 724 | needrandom = false; |
| 725 | list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) |
| 726 | if (ran == tmpassoc->association_id) { |
| 727 | needrandom = true; |
| 728 | break; |
| 729 | } |
| 730 | if (!needrandom) { |
| 731 | assoc->association_id = ran; |
| 732 | list_add_tail(&assoc->a_list, &tgtport->assoc_list); |
| 733 | } |
| 734 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 735 | } |
| 736 | |
| 737 | return assoc; |
| 738 | |
| 739 | out_ida_put: |
| 740 | ida_simple_remove(&tgtport->assoc_cnt, idx); |
| 741 | out_free_assoc: |
| 742 | kfree(assoc); |
| 743 | return NULL; |
| 744 | } |
| 745 | |
| 746 | static void |
| 747 | nvmet_fc_target_assoc_free(struct kref *ref) |
| 748 | { |
| 749 | struct nvmet_fc_tgt_assoc *assoc = |
| 750 | container_of(ref, struct nvmet_fc_tgt_assoc, ref); |
| 751 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
| 752 | unsigned long flags; |
| 753 | |
| 754 | spin_lock_irqsave(&tgtport->lock, flags); |
| 755 | list_del(&assoc->a_list); |
| 756 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 757 | ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); |
| 758 | kfree(assoc); |
| 759 | nvmet_fc_tgtport_put(tgtport); |
| 760 | } |
| 761 | |
| 762 | static void |
| 763 | nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) |
| 764 | { |
| 765 | kref_put(&assoc->ref, nvmet_fc_target_assoc_free); |
| 766 | } |
| 767 | |
| 768 | static int |
| 769 | nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) |
| 770 | { |
| 771 | return kref_get_unless_zero(&assoc->ref); |
| 772 | } |
| 773 | |
| 774 | static void |
| 775 | nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) |
| 776 | { |
| 777 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
| 778 | struct nvmet_fc_tgt_queue *queue; |
| 779 | unsigned long flags; |
| 780 | int i; |
| 781 | |
| 782 | spin_lock_irqsave(&tgtport->lock, flags); |
| 783 | for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { |
| 784 | queue = assoc->queues[i]; |
| 785 | if (queue) { |
| 786 | if (!nvmet_fc_tgt_q_get(queue)) |
| 787 | continue; |
| 788 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 789 | nvmet_fc_delete_target_queue(queue); |
| 790 | nvmet_fc_tgt_q_put(queue); |
| 791 | spin_lock_irqsave(&tgtport->lock, flags); |
| 792 | } |
| 793 | } |
| 794 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 795 | |
| 796 | nvmet_fc_tgt_a_put(assoc); |
| 797 | } |
| 798 | |
| 799 | static struct nvmet_fc_tgt_assoc * |
| 800 | nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, |
| 801 | u64 association_id) |
| 802 | { |
| 803 | struct nvmet_fc_tgt_assoc *assoc; |
| 804 | struct nvmet_fc_tgt_assoc *ret = NULL; |
| 805 | unsigned long flags; |
| 806 | |
| 807 | spin_lock_irqsave(&tgtport->lock, flags); |
| 808 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { |
| 809 | if (association_id == assoc->association_id) { |
| 810 | ret = assoc; |
| 811 | nvmet_fc_tgt_a_get(assoc); |
| 812 | break; |
| 813 | } |
| 814 | } |
| 815 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 816 | |
| 817 | return ret; |
| 818 | } |
| 819 | |
| 820 | |
| 821 | /** |
| 822 | * nvme_fc_register_targetport - transport entry point called by an |
| 823 | * LLDD to register the existence of a local |
| 824 | * NVME subystem FC port. |
| 825 | * @pinfo: pointer to information about the port to be registered |
| 826 | * @template: LLDD entrypoints and operational parameters for the port |
| 827 | * @dev: physical hardware device node port corresponds to. Will be |
| 828 | * used for DMA mappings |
| 829 | * @portptr: pointer to a local port pointer. Upon success, the routine |
| 830 | * will allocate a nvme_fc_local_port structure and place its |
| 831 | * address in the local port pointer. Upon failure, local port |
| 832 | * pointer will be set to NULL. |
| 833 | * |
| 834 | * Returns: |
| 835 | * a completion status. Must be 0 upon success; a negative errno |
| 836 | * (ex: -ENXIO) upon failure. |
| 837 | */ |
| 838 | int |
| 839 | nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, |
| 840 | struct nvmet_fc_target_template *template, |
| 841 | struct device *dev, |
| 842 | struct nvmet_fc_target_port **portptr) |
| 843 | { |
| 844 | struct nvmet_fc_tgtport *newrec; |
| 845 | unsigned long flags; |
| 846 | int ret, idx; |
| 847 | |
| 848 | if (!template->xmt_ls_rsp || !template->fcp_op || |
| 849 | !template->targetport_delete || |
| 850 | !template->max_hw_queues || !template->max_sgl_segments || |
| 851 | !template->max_dif_sgl_segments || !template->dma_boundary) { |
| 852 | ret = -EINVAL; |
| 853 | goto out_regtgt_failed; |
| 854 | } |
| 855 | |
| 856 | newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), |
| 857 | GFP_KERNEL); |
| 858 | if (!newrec) { |
| 859 | ret = -ENOMEM; |
| 860 | goto out_regtgt_failed; |
| 861 | } |
| 862 | |
| 863 | idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); |
| 864 | if (idx < 0) { |
| 865 | ret = -ENOSPC; |
| 866 | goto out_fail_kfree; |
| 867 | } |
| 868 | |
| 869 | if (!get_device(dev) && dev) { |
| 870 | ret = -ENODEV; |
| 871 | goto out_ida_put; |
| 872 | } |
| 873 | |
| 874 | newrec->fc_target_port.node_name = pinfo->node_name; |
| 875 | newrec->fc_target_port.port_name = pinfo->port_name; |
| 876 | newrec->fc_target_port.private = &newrec[1]; |
| 877 | newrec->fc_target_port.port_id = pinfo->port_id; |
| 878 | newrec->fc_target_port.port_num = idx; |
| 879 | INIT_LIST_HEAD(&newrec->tgt_list); |
| 880 | newrec->dev = dev; |
| 881 | newrec->ops = template; |
| 882 | spin_lock_init(&newrec->lock); |
| 883 | INIT_LIST_HEAD(&newrec->ls_list); |
| 884 | INIT_LIST_HEAD(&newrec->ls_busylist); |
| 885 | INIT_LIST_HEAD(&newrec->assoc_list); |
| 886 | kref_init(&newrec->ref); |
| 887 | ida_init(&newrec->assoc_cnt); |
| 888 | |
| 889 | ret = nvmet_fc_alloc_ls_iodlist(newrec); |
| 890 | if (ret) { |
| 891 | ret = -ENOMEM; |
| 892 | goto out_free_newrec; |
| 893 | } |
| 894 | |
| 895 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 896 | list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); |
| 897 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
| 898 | |
| 899 | *portptr = &newrec->fc_target_port; |
| 900 | return 0; |
| 901 | |
| 902 | out_free_newrec: |
| 903 | put_device(dev); |
| 904 | out_ida_put: |
| 905 | ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); |
| 906 | out_fail_kfree: |
| 907 | kfree(newrec); |
| 908 | out_regtgt_failed: |
| 909 | *portptr = NULL; |
| 910 | return ret; |
| 911 | } |
| 912 | EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); |
| 913 | |
| 914 | |
| 915 | static void |
| 916 | nvmet_fc_free_tgtport(struct kref *ref) |
| 917 | { |
| 918 | struct nvmet_fc_tgtport *tgtport = |
| 919 | container_of(ref, struct nvmet_fc_tgtport, ref); |
| 920 | struct device *dev = tgtport->dev; |
| 921 | unsigned long flags; |
| 922 | |
| 923 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 924 | list_del(&tgtport->tgt_list); |
| 925 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
| 926 | |
| 927 | nvmet_fc_free_ls_iodlist(tgtport); |
| 928 | |
| 929 | /* let the LLDD know we've finished tearing it down */ |
| 930 | tgtport->ops->targetport_delete(&tgtport->fc_target_port); |
| 931 | |
| 932 | ida_simple_remove(&nvmet_fc_tgtport_cnt, |
| 933 | tgtport->fc_target_port.port_num); |
| 934 | |
| 935 | ida_destroy(&tgtport->assoc_cnt); |
| 936 | |
| 937 | kfree(tgtport); |
| 938 | |
| 939 | put_device(dev); |
| 940 | } |
| 941 | |
| 942 | static void |
| 943 | nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) |
| 944 | { |
| 945 | kref_put(&tgtport->ref, nvmet_fc_free_tgtport); |
| 946 | } |
| 947 | |
| 948 | static int |
| 949 | nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) |
| 950 | { |
| 951 | return kref_get_unless_zero(&tgtport->ref); |
| 952 | } |
| 953 | |
| 954 | static void |
| 955 | __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) |
| 956 | { |
| 957 | struct nvmet_fc_tgt_assoc *assoc, *next; |
| 958 | unsigned long flags; |
| 959 | |
| 960 | spin_lock_irqsave(&tgtport->lock, flags); |
| 961 | list_for_each_entry_safe(assoc, next, |
| 962 | &tgtport->assoc_list, a_list) { |
| 963 | if (!nvmet_fc_tgt_a_get(assoc)) |
| 964 | continue; |
| 965 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 966 | nvmet_fc_delete_target_assoc(assoc); |
| 967 | nvmet_fc_tgt_a_put(assoc); |
| 968 | spin_lock_irqsave(&tgtport->lock, flags); |
| 969 | } |
| 970 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 971 | } |
| 972 | |
| 973 | /* |
| 974 | * nvmet layer has called to terminate an association |
| 975 | */ |
| 976 | static void |
| 977 | nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) |
| 978 | { |
| 979 | struct nvmet_fc_tgtport *tgtport, *next; |
| 980 | struct nvmet_fc_tgt_assoc *assoc; |
| 981 | struct nvmet_fc_tgt_queue *queue; |
| 982 | unsigned long flags; |
| 983 | bool found_ctrl = false; |
| 984 | |
| 985 | /* this is a bit ugly, but don't want to make locks layered */ |
| 986 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 987 | list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, |
| 988 | tgt_list) { |
| 989 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 990 | continue; |
| 991 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
| 992 | |
| 993 | spin_lock_irqsave(&tgtport->lock, flags); |
| 994 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { |
| 995 | queue = assoc->queues[0]; |
| 996 | if (queue && queue->nvme_sq.ctrl == ctrl) { |
| 997 | if (nvmet_fc_tgt_a_get(assoc)) |
| 998 | found_ctrl = true; |
| 999 | break; |
| 1000 | } |
| 1001 | } |
| 1002 | spin_unlock_irqrestore(&tgtport->lock, flags); |
| 1003 | |
| 1004 | nvmet_fc_tgtport_put(tgtport); |
| 1005 | |
| 1006 | if (found_ctrl) { |
| 1007 | nvmet_fc_delete_target_assoc(assoc); |
| 1008 | nvmet_fc_tgt_a_put(assoc); |
| 1009 | return; |
| 1010 | } |
| 1011 | |
| 1012 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1013 | } |
| 1014 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
| 1015 | } |
| 1016 | |
| 1017 | /** |
| 1018 | * nvme_fc_unregister_targetport - transport entry point called by an |
| 1019 | * LLDD to deregister/remove a previously |
| 1020 | * registered a local NVME subsystem FC port. |
| 1021 | * @tgtport: pointer to the (registered) target port that is to be |
| 1022 | * deregistered. |
| 1023 | * |
| 1024 | * Returns: |
| 1025 | * a completion status. Must be 0 upon success; a negative errno |
| 1026 | * (ex: -ENXIO) upon failure. |
| 1027 | */ |
| 1028 | int |
| 1029 | nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) |
| 1030 | { |
| 1031 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); |
| 1032 | |
| 1033 | /* terminate any outstanding associations */ |
| 1034 | __nvmet_fc_free_assocs(tgtport); |
| 1035 | |
| 1036 | nvmet_fc_tgtport_put(tgtport); |
| 1037 | |
| 1038 | return 0; |
| 1039 | } |
| 1040 | EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); |
| 1041 | |
| 1042 | |
| 1043 | /* *********************** FC-NVME LS Handling **************************** */ |
| 1044 | |
| 1045 | |
| 1046 | static void |
| 1047 | nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd) |
| 1048 | { |
| 1049 | struct fcnvme_ls_acc_hdr *acc = buf; |
| 1050 | |
| 1051 | acc->w0.ls_cmd = ls_cmd; |
| 1052 | acc->desc_list_len = desc_len; |
| 1053 | acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); |
| 1054 | acc->rqst.desc_len = |
| 1055 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); |
| 1056 | acc->rqst.w0.ls_cmd = rqst_ls_cmd; |
| 1057 | } |
| 1058 | |
| 1059 | static int |
| 1060 | nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, |
| 1061 | u8 reason, u8 explanation, u8 vendor) |
| 1062 | { |
| 1063 | struct fcnvme_ls_rjt *rjt = buf; |
| 1064 | |
| 1065 | nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, |
| 1066 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), |
| 1067 | ls_cmd); |
| 1068 | rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); |
| 1069 | rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); |
| 1070 | rjt->rjt.reason_code = reason; |
| 1071 | rjt->rjt.reason_explanation = explanation; |
| 1072 | rjt->rjt.vendor = vendor; |
| 1073 | |
| 1074 | return sizeof(struct fcnvme_ls_rjt); |
| 1075 | } |
| 1076 | |
| 1077 | /* Validation Error indexes into the string table below */ |
| 1078 | enum { |
| 1079 | VERR_NO_ERROR = 0, |
| 1080 | VERR_CR_ASSOC_LEN = 1, |
| 1081 | VERR_CR_ASSOC_RQST_LEN = 2, |
| 1082 | VERR_CR_ASSOC_CMD = 3, |
| 1083 | VERR_CR_ASSOC_CMD_LEN = 4, |
| 1084 | VERR_ERSP_RATIO = 5, |
| 1085 | VERR_ASSOC_ALLOC_FAIL = 6, |
| 1086 | VERR_QUEUE_ALLOC_FAIL = 7, |
| 1087 | VERR_CR_CONN_LEN = 8, |
| 1088 | VERR_CR_CONN_RQST_LEN = 9, |
| 1089 | VERR_ASSOC_ID = 10, |
| 1090 | VERR_ASSOC_ID_LEN = 11, |
| 1091 | VERR_NO_ASSOC = 12, |
| 1092 | VERR_CONN_ID = 13, |
| 1093 | VERR_CONN_ID_LEN = 14, |
| 1094 | VERR_NO_CONN = 15, |
| 1095 | VERR_CR_CONN_CMD = 16, |
| 1096 | VERR_CR_CONN_CMD_LEN = 17, |
| 1097 | VERR_DISCONN_LEN = 18, |
| 1098 | VERR_DISCONN_RQST_LEN = 19, |
| 1099 | VERR_DISCONN_CMD = 20, |
| 1100 | VERR_DISCONN_CMD_LEN = 21, |
| 1101 | VERR_DISCONN_SCOPE = 22, |
| 1102 | VERR_RS_LEN = 23, |
| 1103 | VERR_RS_RQST_LEN = 24, |
| 1104 | VERR_RS_CMD = 25, |
| 1105 | VERR_RS_CMD_LEN = 26, |
| 1106 | VERR_RS_RCTL = 27, |
| 1107 | VERR_RS_RO = 28, |
| 1108 | }; |
| 1109 | |
| 1110 | static char *validation_errors[] = { |
| 1111 | "OK", |
| 1112 | "Bad CR_ASSOC Length", |
| 1113 | "Bad CR_ASSOC Rqst Length", |
| 1114 | "Not CR_ASSOC Cmd", |
| 1115 | "Bad CR_ASSOC Cmd Length", |
| 1116 | "Bad Ersp Ratio", |
| 1117 | "Association Allocation Failed", |
| 1118 | "Queue Allocation Failed", |
| 1119 | "Bad CR_CONN Length", |
| 1120 | "Bad CR_CONN Rqst Length", |
| 1121 | "Not Association ID", |
| 1122 | "Bad Association ID Length", |
| 1123 | "No Association", |
| 1124 | "Not Connection ID", |
| 1125 | "Bad Connection ID Length", |
| 1126 | "No Connection", |
| 1127 | "Not CR_CONN Cmd", |
| 1128 | "Bad CR_CONN Cmd Length", |
| 1129 | "Bad DISCONN Length", |
| 1130 | "Bad DISCONN Rqst Length", |
| 1131 | "Not DISCONN Cmd", |
| 1132 | "Bad DISCONN Cmd Length", |
| 1133 | "Bad Disconnect Scope", |
| 1134 | "Bad RS Length", |
| 1135 | "Bad RS Rqst Length", |
| 1136 | "Not RS Cmd", |
| 1137 | "Bad RS Cmd Length", |
| 1138 | "Bad RS R_CTL", |
| 1139 | "Bad RS Relative Offset", |
| 1140 | }; |
| 1141 | |
| 1142 | static void |
| 1143 | nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, |
| 1144 | struct nvmet_fc_ls_iod *iod) |
| 1145 | { |
| 1146 | struct fcnvme_ls_cr_assoc_rqst *rqst = |
| 1147 | (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf; |
| 1148 | struct fcnvme_ls_cr_assoc_acc *acc = |
| 1149 | (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf; |
| 1150 | struct nvmet_fc_tgt_queue *queue; |
| 1151 | int ret = 0; |
| 1152 | |
| 1153 | memset(acc, 0, sizeof(*acc)); |
| 1154 | |
| 1155 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst)) |
| 1156 | ret = VERR_CR_ASSOC_LEN; |
| 1157 | else if (rqst->desc_list_len != |
| 1158 | fcnvme_lsdesc_len( |
| 1159 | sizeof(struct fcnvme_ls_cr_assoc_rqst))) |
| 1160 | ret = VERR_CR_ASSOC_RQST_LEN; |
| 1161 | else if (rqst->assoc_cmd.desc_tag != |
| 1162 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) |
| 1163 | ret = VERR_CR_ASSOC_CMD; |
| 1164 | else if (rqst->assoc_cmd.desc_len != |
| 1165 | fcnvme_lsdesc_len( |
| 1166 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd))) |
| 1167 | ret = VERR_CR_ASSOC_CMD_LEN; |
| 1168 | else if (!rqst->assoc_cmd.ersp_ratio || |
| 1169 | (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= |
| 1170 | be16_to_cpu(rqst->assoc_cmd.sqsize))) |
| 1171 | ret = VERR_ERSP_RATIO; |
| 1172 | |
| 1173 | else { |
| 1174 | /* new association w/ admin queue */ |
| 1175 | iod->assoc = nvmet_fc_alloc_target_assoc(tgtport); |
| 1176 | if (!iod->assoc) |
| 1177 | ret = VERR_ASSOC_ALLOC_FAIL; |
| 1178 | else { |
| 1179 | queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, |
| 1180 | be16_to_cpu(rqst->assoc_cmd.sqsize)); |
| 1181 | if (!queue) |
| 1182 | ret = VERR_QUEUE_ALLOC_FAIL; |
| 1183 | } |
| 1184 | } |
| 1185 | |
| 1186 | if (ret) { |
| 1187 | dev_err(tgtport->dev, |
| 1188 | "Create Association LS failed: %s\n", |
| 1189 | validation_errors[ret]); |
| 1190 | iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, |
| 1191 | NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, |
| 1192 | ELS_RJT_LOGIC, |
| 1193 | ELS_EXPL_NONE, 0); |
| 1194 | return; |
| 1195 | } |
| 1196 | |
| 1197 | queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); |
| 1198 | atomic_set(&queue->connected, 1); |
| 1199 | queue->sqhd = 0; /* best place to init value */ |
| 1200 | |
| 1201 | /* format a response */ |
| 1202 | |
| 1203 | iod->lsreq->rsplen = sizeof(*acc); |
| 1204 | |
| 1205 | nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
| 1206 | fcnvme_lsdesc_len( |
| 1207 | sizeof(struct fcnvme_ls_cr_assoc_acc)), |
| 1208 | FCNVME_LS_CREATE_ASSOCIATION); |
| 1209 | acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); |
| 1210 | acc->associd.desc_len = |
| 1211 | fcnvme_lsdesc_len( |
| 1212 | sizeof(struct fcnvme_lsdesc_assoc_id)); |
| 1213 | acc->associd.association_id = |
| 1214 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); |
| 1215 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
| 1216 | acc->connectid.desc_len = |
| 1217 | fcnvme_lsdesc_len( |
| 1218 | sizeof(struct fcnvme_lsdesc_conn_id)); |
| 1219 | acc->connectid.connection_id = acc->associd.association_id; |
| 1220 | } |
| 1221 | |
| 1222 | static void |
| 1223 | nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, |
| 1224 | struct nvmet_fc_ls_iod *iod) |
| 1225 | { |
| 1226 | struct fcnvme_ls_cr_conn_rqst *rqst = |
| 1227 | (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf; |
| 1228 | struct fcnvme_ls_cr_conn_acc *acc = |
| 1229 | (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf; |
| 1230 | struct nvmet_fc_tgt_queue *queue; |
| 1231 | int ret = 0; |
| 1232 | |
| 1233 | memset(acc, 0, sizeof(*acc)); |
| 1234 | |
| 1235 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) |
| 1236 | ret = VERR_CR_CONN_LEN; |
| 1237 | else if (rqst->desc_list_len != |
| 1238 | fcnvme_lsdesc_len( |
| 1239 | sizeof(struct fcnvme_ls_cr_conn_rqst))) |
| 1240 | ret = VERR_CR_CONN_RQST_LEN; |
| 1241 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) |
| 1242 | ret = VERR_ASSOC_ID; |
| 1243 | else if (rqst->associd.desc_len != |
| 1244 | fcnvme_lsdesc_len( |
| 1245 | sizeof(struct fcnvme_lsdesc_assoc_id))) |
| 1246 | ret = VERR_ASSOC_ID_LEN; |
| 1247 | else if (rqst->connect_cmd.desc_tag != |
| 1248 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) |
| 1249 | ret = VERR_CR_CONN_CMD; |
| 1250 | else if (rqst->connect_cmd.desc_len != |
| 1251 | fcnvme_lsdesc_len( |
| 1252 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) |
| 1253 | ret = VERR_CR_CONN_CMD_LEN; |
| 1254 | else if (!rqst->connect_cmd.ersp_ratio || |
| 1255 | (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= |
| 1256 | be16_to_cpu(rqst->connect_cmd.sqsize))) |
| 1257 | ret = VERR_ERSP_RATIO; |
| 1258 | |
| 1259 | else { |
| 1260 | /* new io queue */ |
| 1261 | iod->assoc = nvmet_fc_find_target_assoc(tgtport, |
| 1262 | be64_to_cpu(rqst->associd.association_id)); |
| 1263 | if (!iod->assoc) |
| 1264 | ret = VERR_NO_ASSOC; |
| 1265 | else { |
| 1266 | queue = nvmet_fc_alloc_target_queue(iod->assoc, |
| 1267 | be16_to_cpu(rqst->connect_cmd.qid), |
| 1268 | be16_to_cpu(rqst->connect_cmd.sqsize)); |
| 1269 | if (!queue) |
| 1270 | ret = VERR_QUEUE_ALLOC_FAIL; |
| 1271 | |
| 1272 | /* release get taken in nvmet_fc_find_target_assoc */ |
| 1273 | nvmet_fc_tgt_a_put(iod->assoc); |
| 1274 | } |
| 1275 | } |
| 1276 | |
| 1277 | if (ret) { |
| 1278 | dev_err(tgtport->dev, |
| 1279 | "Create Connection LS failed: %s\n", |
| 1280 | validation_errors[ret]); |
| 1281 | iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, |
| 1282 | NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, |
| 1283 | (ret == VERR_NO_ASSOC) ? |
| 1284 | ELS_RJT_PROT : ELS_RJT_LOGIC, |
| 1285 | ELS_EXPL_NONE, 0); |
| 1286 | return; |
| 1287 | } |
| 1288 | |
| 1289 | queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); |
| 1290 | atomic_set(&queue->connected, 1); |
| 1291 | queue->sqhd = 0; /* best place to init value */ |
| 1292 | |
| 1293 | /* format a response */ |
| 1294 | |
| 1295 | iod->lsreq->rsplen = sizeof(*acc); |
| 1296 | |
| 1297 | nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
| 1298 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), |
| 1299 | FCNVME_LS_CREATE_CONNECTION); |
| 1300 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
| 1301 | acc->connectid.desc_len = |
| 1302 | fcnvme_lsdesc_len( |
| 1303 | sizeof(struct fcnvme_lsdesc_conn_id)); |
| 1304 | acc->connectid.connection_id = |
| 1305 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, |
| 1306 | be16_to_cpu(rqst->connect_cmd.qid))); |
| 1307 | } |
| 1308 | |
| 1309 | static void |
| 1310 | nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, |
| 1311 | struct nvmet_fc_ls_iod *iod) |
| 1312 | { |
| 1313 | struct fcnvme_ls_disconnect_rqst *rqst = |
| 1314 | (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; |
| 1315 | struct fcnvme_ls_disconnect_acc *acc = |
| 1316 | (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; |
James Smart | c81e55e | 2016-12-24 09:46:43 -0800 | [diff] [blame^] | 1317 | struct nvmet_fc_tgt_queue *queue = NULL; |
James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1318 | struct nvmet_fc_tgt_assoc *assoc; |
| 1319 | int ret = 0; |
| 1320 | bool del_assoc = false; |
| 1321 | |
| 1322 | memset(acc, 0, sizeof(*acc)); |
| 1323 | |
| 1324 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) |
| 1325 | ret = VERR_DISCONN_LEN; |
| 1326 | else if (rqst->desc_list_len != |
| 1327 | fcnvme_lsdesc_len( |
| 1328 | sizeof(struct fcnvme_ls_disconnect_rqst))) |
| 1329 | ret = VERR_DISCONN_RQST_LEN; |
| 1330 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) |
| 1331 | ret = VERR_ASSOC_ID; |
| 1332 | else if (rqst->associd.desc_len != |
| 1333 | fcnvme_lsdesc_len( |
| 1334 | sizeof(struct fcnvme_lsdesc_assoc_id))) |
| 1335 | ret = VERR_ASSOC_ID_LEN; |
| 1336 | else if (rqst->discon_cmd.desc_tag != |
| 1337 | cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) |
| 1338 | ret = VERR_DISCONN_CMD; |
| 1339 | else if (rqst->discon_cmd.desc_len != |
| 1340 | fcnvme_lsdesc_len( |
| 1341 | sizeof(struct fcnvme_lsdesc_disconn_cmd))) |
| 1342 | ret = VERR_DISCONN_CMD_LEN; |
| 1343 | else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && |
| 1344 | (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) |
| 1345 | ret = VERR_DISCONN_SCOPE; |
| 1346 | else { |
| 1347 | /* match an active association */ |
| 1348 | assoc = nvmet_fc_find_target_assoc(tgtport, |
| 1349 | be64_to_cpu(rqst->associd.association_id)); |
| 1350 | iod->assoc = assoc; |
James Smart | c81e55e | 2016-12-24 09:46:43 -0800 | [diff] [blame^] | 1351 | if (assoc) { |
| 1352 | if (rqst->discon_cmd.scope == |
| 1353 | FCNVME_DISCONN_CONNECTION) { |
| 1354 | queue = nvmet_fc_find_target_queue(tgtport, |
| 1355 | be64_to_cpu( |
| 1356 | rqst->discon_cmd.id)); |
| 1357 | if (!queue) { |
| 1358 | nvmet_fc_tgt_a_put(assoc); |
| 1359 | ret = VERR_NO_CONN; |
| 1360 | } |
| 1361 | } |
| 1362 | } else |
James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1363 | ret = VERR_NO_ASSOC; |
| 1364 | } |
| 1365 | |
| 1366 | if (ret) { |
| 1367 | dev_err(tgtport->dev, |
| 1368 | "Disconnect LS failed: %s\n", |
| 1369 | validation_errors[ret]); |
| 1370 | iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, |
| 1371 | NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, |
| 1372 | (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC, |
| 1373 | ELS_EXPL_NONE, 0); |
| 1374 | return; |
| 1375 | } |
| 1376 | |
| 1377 | /* format a response */ |
| 1378 | |
| 1379 | iod->lsreq->rsplen = sizeof(*acc); |
| 1380 | |
| 1381 | nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
| 1382 | fcnvme_lsdesc_len( |
| 1383 | sizeof(struct fcnvme_ls_disconnect_acc)), |
| 1384 | FCNVME_LS_DISCONNECT); |
| 1385 | |
| 1386 | |
James Smart | c81e55e | 2016-12-24 09:46:43 -0800 | [diff] [blame^] | 1387 | /* are we to delete a Connection ID (queue) */ |
| 1388 | if (queue) { |
| 1389 | int qid = queue->qid; |
James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1390 | |
James Smart | c81e55e | 2016-12-24 09:46:43 -0800 | [diff] [blame^] | 1391 | nvmet_fc_delete_target_queue(queue); |
James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1392 | |
James Smart | c81e55e | 2016-12-24 09:46:43 -0800 | [diff] [blame^] | 1393 | /* release the get taken by find_target_queue */ |
| 1394 | nvmet_fc_tgt_q_put(queue); |
James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1395 | |
James Smart | c81e55e | 2016-12-24 09:46:43 -0800 | [diff] [blame^] | 1396 | /* tear association down if io queue terminated */ |
| 1397 | if (!qid) |
| 1398 | del_assoc = true; |
James Smart | c534320 | 2016-12-02 00:28:43 -0800 | [diff] [blame] | 1399 | } |
| 1400 | |
| 1401 | /* release get taken in nvmet_fc_find_target_assoc */ |
| 1402 | nvmet_fc_tgt_a_put(iod->assoc); |
| 1403 | |
| 1404 | if (del_assoc) |
| 1405 | nvmet_fc_delete_target_assoc(iod->assoc); |
| 1406 | } |
| 1407 | |
| 1408 | |
| 1409 | /* *********************** NVME Ctrl Routines **************************** */ |
| 1410 | |
| 1411 | |
| 1412 | static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); |
| 1413 | |
| 1414 | static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; |
| 1415 | |
| 1416 | static void |
| 1417 | nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) |
| 1418 | { |
| 1419 | struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private; |
| 1420 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
| 1421 | |
| 1422 | fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, |
| 1423 | NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); |
| 1424 | nvmet_fc_free_ls_iod(tgtport, iod); |
| 1425 | nvmet_fc_tgtport_put(tgtport); |
| 1426 | } |
| 1427 | |
| 1428 | static void |
| 1429 | nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
| 1430 | struct nvmet_fc_ls_iod *iod) |
| 1431 | { |
| 1432 | int ret; |
| 1433 | |
| 1434 | fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, |
| 1435 | NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); |
| 1436 | |
| 1437 | ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq); |
| 1438 | if (ret) |
| 1439 | nvmet_fc_xmt_ls_rsp_done(iod->lsreq); |
| 1440 | } |
| 1441 | |
| 1442 | /* |
| 1443 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
| 1444 | */ |
| 1445 | static void |
| 1446 | nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, |
| 1447 | struct nvmet_fc_ls_iod *iod) |
| 1448 | { |
| 1449 | struct fcnvme_ls_rqst_w0 *w0 = |
| 1450 | (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf; |
| 1451 | |
| 1452 | iod->lsreq->nvmet_fc_private = iod; |
| 1453 | iod->lsreq->rspbuf = iod->rspbuf; |
| 1454 | iod->lsreq->rspdma = iod->rspdma; |
| 1455 | iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done; |
| 1456 | /* Be preventative. handlers will later set to valid length */ |
| 1457 | iod->lsreq->rsplen = 0; |
| 1458 | |
| 1459 | iod->assoc = NULL; |
| 1460 | |
| 1461 | /* |
| 1462 | * handlers: |
| 1463 | * parse request input, execute the request, and format the |
| 1464 | * LS response |
| 1465 | */ |
| 1466 | switch (w0->ls_cmd) { |
| 1467 | case FCNVME_LS_CREATE_ASSOCIATION: |
| 1468 | /* Creates Association and initial Admin Queue/Connection */ |
| 1469 | nvmet_fc_ls_create_association(tgtport, iod); |
| 1470 | break; |
| 1471 | case FCNVME_LS_CREATE_CONNECTION: |
| 1472 | /* Creates an IO Queue/Connection */ |
| 1473 | nvmet_fc_ls_create_connection(tgtport, iod); |
| 1474 | break; |
| 1475 | case FCNVME_LS_DISCONNECT: |
| 1476 | /* Terminate a Queue/Connection or the Association */ |
| 1477 | nvmet_fc_ls_disconnect(tgtport, iod); |
| 1478 | break; |
| 1479 | default: |
| 1480 | iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf, |
| 1481 | NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd, |
| 1482 | ELS_RJT_INVAL, ELS_EXPL_NONE, 0); |
| 1483 | } |
| 1484 | |
| 1485 | nvmet_fc_xmt_ls_rsp(tgtport, iod); |
| 1486 | } |
| 1487 | |
| 1488 | /* |
| 1489 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
| 1490 | */ |
| 1491 | static void |
| 1492 | nvmet_fc_handle_ls_rqst_work(struct work_struct *work) |
| 1493 | { |
| 1494 | struct nvmet_fc_ls_iod *iod = |
| 1495 | container_of(work, struct nvmet_fc_ls_iod, work); |
| 1496 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
| 1497 | |
| 1498 | nvmet_fc_handle_ls_rqst(tgtport, iod); |
| 1499 | } |
| 1500 | |
| 1501 | |
| 1502 | /** |
| 1503 | * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD |
| 1504 | * upon the reception of a NVME LS request. |
| 1505 | * |
| 1506 | * The nvmet-fc layer will copy payload to an internal structure for |
| 1507 | * processing. As such, upon completion of the routine, the LLDD may |
| 1508 | * immediately free/reuse the LS request buffer passed in the call. |
| 1509 | * |
| 1510 | * If this routine returns error, the LLDD should abort the exchange. |
| 1511 | * |
| 1512 | * @tgtport: pointer to the (registered) target port the LS was |
| 1513 | * received on. |
| 1514 | * @lsreq: pointer to a lsreq request structure to be used to reference |
| 1515 | * the exchange corresponding to the LS. |
| 1516 | * @lsreqbuf: pointer to the buffer containing the LS Request |
| 1517 | * @lsreqbuf_len: length, in bytes, of the received LS request |
| 1518 | */ |
| 1519 | int |
| 1520 | nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, |
| 1521 | struct nvmefc_tgt_ls_req *lsreq, |
| 1522 | void *lsreqbuf, u32 lsreqbuf_len) |
| 1523 | { |
| 1524 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); |
| 1525 | struct nvmet_fc_ls_iod *iod; |
| 1526 | |
| 1527 | if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE) |
| 1528 | return -E2BIG; |
| 1529 | |
| 1530 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 1531 | return -ESHUTDOWN; |
| 1532 | |
| 1533 | iod = nvmet_fc_alloc_ls_iod(tgtport); |
| 1534 | if (!iod) { |
| 1535 | nvmet_fc_tgtport_put(tgtport); |
| 1536 | return -ENOENT; |
| 1537 | } |
| 1538 | |
| 1539 | iod->lsreq = lsreq; |
| 1540 | iod->fcpreq = NULL; |
| 1541 | memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); |
| 1542 | iod->rqstdatalen = lsreqbuf_len; |
| 1543 | |
| 1544 | schedule_work(&iod->work); |
| 1545 | |
| 1546 | return 0; |
| 1547 | } |
| 1548 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); |
| 1549 | |
| 1550 | |
| 1551 | /* |
| 1552 | * ********************** |
| 1553 | * Start of FCP handling |
| 1554 | * ********************** |
| 1555 | */ |
| 1556 | |
| 1557 | static int |
| 1558 | nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
| 1559 | { |
| 1560 | struct scatterlist *sg; |
| 1561 | struct page *page; |
| 1562 | unsigned int nent; |
| 1563 | u32 page_len, length; |
| 1564 | int i = 0; |
| 1565 | |
| 1566 | length = fod->total_length; |
| 1567 | nent = DIV_ROUND_UP(length, PAGE_SIZE); |
| 1568 | sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); |
| 1569 | if (!sg) |
| 1570 | goto out; |
| 1571 | |
| 1572 | sg_init_table(sg, nent); |
| 1573 | |
| 1574 | while (length) { |
| 1575 | page_len = min_t(u32, length, PAGE_SIZE); |
| 1576 | |
| 1577 | page = alloc_page(GFP_KERNEL); |
| 1578 | if (!page) |
| 1579 | goto out_free_pages; |
| 1580 | |
| 1581 | sg_set_page(&sg[i], page, page_len, 0); |
| 1582 | length -= page_len; |
| 1583 | i++; |
| 1584 | } |
| 1585 | |
| 1586 | fod->data_sg = sg; |
| 1587 | fod->data_sg_cnt = nent; |
| 1588 | fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, |
| 1589 | ((fod->io_dir == NVMET_FCP_WRITE) ? |
| 1590 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
| 1591 | /* note: write from initiator perspective */ |
| 1592 | |
| 1593 | return 0; |
| 1594 | |
| 1595 | out_free_pages: |
| 1596 | while (i > 0) { |
| 1597 | i--; |
| 1598 | __free_page(sg_page(&sg[i])); |
| 1599 | } |
| 1600 | kfree(sg); |
| 1601 | fod->data_sg = NULL; |
| 1602 | fod->data_sg_cnt = 0; |
| 1603 | out: |
| 1604 | return NVME_SC_INTERNAL; |
| 1605 | } |
| 1606 | |
| 1607 | static void |
| 1608 | nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
| 1609 | { |
| 1610 | struct scatterlist *sg; |
| 1611 | int count; |
| 1612 | |
| 1613 | if (!fod->data_sg || !fod->data_sg_cnt) |
| 1614 | return; |
| 1615 | |
| 1616 | fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, |
| 1617 | ((fod->io_dir == NVMET_FCP_WRITE) ? |
| 1618 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
| 1619 | for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count) |
| 1620 | __free_page(sg_page(sg)); |
| 1621 | kfree(fod->data_sg); |
| 1622 | } |
| 1623 | |
| 1624 | |
| 1625 | static bool |
| 1626 | queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) |
| 1627 | { |
| 1628 | u32 sqtail, used; |
| 1629 | |
| 1630 | /* egad, this is ugly. And sqtail is just a best guess */ |
| 1631 | sqtail = atomic_read(&q->sqtail) % q->sqsize; |
| 1632 | |
| 1633 | used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); |
| 1634 | return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); |
| 1635 | } |
| 1636 | |
| 1637 | /* |
| 1638 | * Prep RSP payload. |
| 1639 | * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op |
| 1640 | */ |
| 1641 | static void |
| 1642 | nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
| 1643 | struct nvmet_fc_fcp_iod *fod) |
| 1644 | { |
| 1645 | struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; |
| 1646 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
| 1647 | struct nvme_completion *cqe = &ersp->cqe; |
| 1648 | u32 *cqewd = (u32 *)cqe; |
| 1649 | bool send_ersp = false; |
| 1650 | u32 rsn, rspcnt, xfr_length; |
| 1651 | |
| 1652 | if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) |
| 1653 | xfr_length = fod->total_length; |
| 1654 | else |
| 1655 | xfr_length = fod->offset; |
| 1656 | |
| 1657 | /* |
| 1658 | * check to see if we can send a 0's rsp. |
| 1659 | * Note: to send a 0's response, the NVME-FC host transport will |
| 1660 | * recreate the CQE. The host transport knows: sq id, SQHD (last |
| 1661 | * seen in an ersp), and command_id. Thus it will create a |
| 1662 | * zero-filled CQE with those known fields filled in. Transport |
| 1663 | * must send an ersp for any condition where the cqe won't match |
| 1664 | * this. |
| 1665 | * |
| 1666 | * Here are the FC-NVME mandated cases where we must send an ersp: |
| 1667 | * every N responses, where N=ersp_ratio |
| 1668 | * force fabric commands to send ersp's (not in FC-NVME but good |
| 1669 | * practice) |
| 1670 | * normal cmds: any time status is non-zero, or status is zero |
| 1671 | * but words 0 or 1 are non-zero. |
| 1672 | * the SQ is 90% or more full |
| 1673 | * the cmd is a fused command |
| 1674 | * transferred data length not equal to cmd iu length |
| 1675 | */ |
| 1676 | rspcnt = atomic_inc_return(&fod->queue->zrspcnt); |
| 1677 | if (!(rspcnt % fod->queue->ersp_ratio) || |
| 1678 | sqe->opcode == nvme_fabrics_command || |
| 1679 | xfr_length != fod->total_length || |
| 1680 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
| 1681 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || |
| 1682 | queue_90percent_full(fod->queue, cqe->sq_head)) |
| 1683 | send_ersp = true; |
| 1684 | |
| 1685 | /* re-set the fields */ |
| 1686 | fod->fcpreq->rspaddr = ersp; |
| 1687 | fod->fcpreq->rspdma = fod->rspdma; |
| 1688 | |
| 1689 | if (!send_ersp) { |
| 1690 | memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); |
| 1691 | fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; |
| 1692 | } else { |
| 1693 | ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); |
| 1694 | rsn = atomic_inc_return(&fod->queue->rsn); |
| 1695 | ersp->rsn = cpu_to_be32(rsn); |
| 1696 | ersp->xfrd_len = cpu_to_be32(xfr_length); |
| 1697 | fod->fcpreq->rsplen = sizeof(*ersp); |
| 1698 | } |
| 1699 | |
| 1700 | fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, |
| 1701 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); |
| 1702 | } |
| 1703 | |
| 1704 | static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); |
| 1705 | |
| 1706 | static void |
| 1707 | nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
| 1708 | struct nvmet_fc_fcp_iod *fod) |
| 1709 | { |
| 1710 | int ret; |
| 1711 | |
| 1712 | fod->fcpreq->op = NVMET_FCOP_RSP; |
| 1713 | fod->fcpreq->timeout = 0; |
| 1714 | |
| 1715 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
| 1716 | |
| 1717 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
| 1718 | if (ret) |
| 1719 | nvmet_fc_abort_op(tgtport, fod->fcpreq); |
| 1720 | } |
| 1721 | |
| 1722 | static void |
| 1723 | nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, |
| 1724 | struct nvmet_fc_fcp_iod *fod, u8 op) |
| 1725 | { |
| 1726 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 1727 | struct scatterlist *sg, *datasg; |
| 1728 | u32 tlen, sg_off; |
| 1729 | int ret; |
| 1730 | |
| 1731 | fcpreq->op = op; |
| 1732 | fcpreq->offset = fod->offset; |
| 1733 | fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; |
| 1734 | tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024), |
| 1735 | (fod->total_length - fod->offset)); |
| 1736 | tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE); |
| 1737 | tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments |
| 1738 | * PAGE_SIZE); |
| 1739 | fcpreq->transfer_length = tlen; |
| 1740 | fcpreq->transferred_length = 0; |
| 1741 | fcpreq->fcp_error = 0; |
| 1742 | fcpreq->rsplen = 0; |
| 1743 | |
| 1744 | fcpreq->sg_cnt = 0; |
| 1745 | |
| 1746 | datasg = fod->next_sg; |
| 1747 | sg_off = fod->next_sg_offset; |
| 1748 | |
| 1749 | for (sg = fcpreq->sg ; tlen; sg++) { |
| 1750 | *sg = *datasg; |
| 1751 | if (sg_off) { |
| 1752 | sg->offset += sg_off; |
| 1753 | sg->length -= sg_off; |
| 1754 | sg->dma_address += sg_off; |
| 1755 | sg_off = 0; |
| 1756 | } |
| 1757 | if (tlen < sg->length) { |
| 1758 | sg->length = tlen; |
| 1759 | fod->next_sg = datasg; |
| 1760 | fod->next_sg_offset += tlen; |
| 1761 | } else if (tlen == sg->length) { |
| 1762 | fod->next_sg_offset = 0; |
| 1763 | fod->next_sg = sg_next(datasg); |
| 1764 | } else { |
| 1765 | fod->next_sg_offset = 0; |
| 1766 | datasg = sg_next(datasg); |
| 1767 | } |
| 1768 | tlen -= sg->length; |
| 1769 | fcpreq->sg_cnt++; |
| 1770 | } |
| 1771 | |
| 1772 | /* |
| 1773 | * If the last READDATA request: check if LLDD supports |
| 1774 | * combined xfr with response. |
| 1775 | */ |
| 1776 | if ((op == NVMET_FCOP_READDATA) && |
| 1777 | ((fod->offset + fcpreq->transfer_length) == fod->total_length) && |
| 1778 | (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { |
| 1779 | fcpreq->op = NVMET_FCOP_READDATA_RSP; |
| 1780 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
| 1781 | } |
| 1782 | |
| 1783 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
| 1784 | if (ret) { |
| 1785 | /* |
| 1786 | * should be ok to set w/o lock as its in the thread of |
| 1787 | * execution (not an async timer routine) and doesn't |
| 1788 | * contend with any clearing action |
| 1789 | */ |
| 1790 | fod->abort = true; |
| 1791 | |
| 1792 | if (op == NVMET_FCOP_WRITEDATA) |
| 1793 | nvmet_req_complete(&fod->req, |
| 1794 | NVME_SC_FC_TRANSPORT_ERROR); |
| 1795 | else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { |
| 1796 | fcpreq->fcp_error = ret; |
| 1797 | fcpreq->transferred_length = 0; |
| 1798 | nvmet_fc_xmt_fcp_op_done(fod->fcpreq); |
| 1799 | } |
| 1800 | } |
| 1801 | } |
| 1802 | |
| 1803 | static void |
| 1804 | nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) |
| 1805 | { |
| 1806 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
| 1807 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 1808 | unsigned long flags; |
| 1809 | bool abort; |
| 1810 | |
| 1811 | spin_lock_irqsave(&fod->flock, flags); |
| 1812 | abort = fod->abort; |
| 1813 | spin_unlock_irqrestore(&fod->flock, flags); |
| 1814 | |
| 1815 | /* if in the middle of an io and we need to tear down */ |
| 1816 | if (abort && fcpreq->op != NVMET_FCOP_ABORT) { |
| 1817 | /* data no longer needed */ |
| 1818 | nvmet_fc_free_tgt_pgs(fod); |
| 1819 | |
| 1820 | if (fcpreq->fcp_error || abort) |
| 1821 | nvmet_req_complete(&fod->req, fcpreq->fcp_error); |
| 1822 | |
| 1823 | return; |
| 1824 | } |
| 1825 | |
| 1826 | switch (fcpreq->op) { |
| 1827 | |
| 1828 | case NVMET_FCOP_WRITEDATA: |
| 1829 | if (abort || fcpreq->fcp_error || |
| 1830 | fcpreq->transferred_length != fcpreq->transfer_length) { |
| 1831 | nvmet_req_complete(&fod->req, |
| 1832 | NVME_SC_FC_TRANSPORT_ERROR); |
| 1833 | return; |
| 1834 | } |
| 1835 | |
| 1836 | fod->offset += fcpreq->transferred_length; |
| 1837 | if (fod->offset != fod->total_length) { |
| 1838 | /* transfer the next chunk */ |
| 1839 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
| 1840 | NVMET_FCOP_WRITEDATA); |
| 1841 | return; |
| 1842 | } |
| 1843 | |
| 1844 | /* data transfer complete, resume with nvmet layer */ |
| 1845 | |
| 1846 | fod->req.execute(&fod->req); |
| 1847 | |
| 1848 | break; |
| 1849 | |
| 1850 | case NVMET_FCOP_READDATA: |
| 1851 | case NVMET_FCOP_READDATA_RSP: |
| 1852 | if (abort || fcpreq->fcp_error || |
| 1853 | fcpreq->transferred_length != fcpreq->transfer_length) { |
| 1854 | /* data no longer needed */ |
| 1855 | nvmet_fc_free_tgt_pgs(fod); |
| 1856 | |
| 1857 | nvmet_fc_abort_op(tgtport, fod->fcpreq); |
| 1858 | return; |
| 1859 | } |
| 1860 | |
| 1861 | /* success */ |
| 1862 | |
| 1863 | if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { |
| 1864 | /* data no longer needed */ |
| 1865 | nvmet_fc_free_tgt_pgs(fod); |
| 1866 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, |
| 1867 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); |
| 1868 | nvmet_fc_free_fcp_iod(fod->queue, fod); |
| 1869 | return; |
| 1870 | } |
| 1871 | |
| 1872 | fod->offset += fcpreq->transferred_length; |
| 1873 | if (fod->offset != fod->total_length) { |
| 1874 | /* transfer the next chunk */ |
| 1875 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
| 1876 | NVMET_FCOP_READDATA); |
| 1877 | return; |
| 1878 | } |
| 1879 | |
| 1880 | /* data transfer complete, send response */ |
| 1881 | |
| 1882 | /* data no longer needed */ |
| 1883 | nvmet_fc_free_tgt_pgs(fod); |
| 1884 | |
| 1885 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
| 1886 | |
| 1887 | break; |
| 1888 | |
| 1889 | case NVMET_FCOP_RSP: |
| 1890 | case NVMET_FCOP_ABORT: |
| 1891 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, |
| 1892 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); |
| 1893 | nvmet_fc_free_fcp_iod(fod->queue, fod); |
| 1894 | break; |
| 1895 | |
| 1896 | default: |
| 1897 | nvmet_fc_free_tgt_pgs(fod); |
| 1898 | nvmet_fc_abort_op(tgtport, fod->fcpreq); |
| 1899 | break; |
| 1900 | } |
| 1901 | } |
| 1902 | |
| 1903 | /* |
| 1904 | * actual completion handler after execution by the nvmet layer |
| 1905 | */ |
| 1906 | static void |
| 1907 | __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, |
| 1908 | struct nvmet_fc_fcp_iod *fod, int status) |
| 1909 | { |
| 1910 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
| 1911 | struct nvme_completion *cqe = &fod->rspiubuf.cqe; |
| 1912 | unsigned long flags; |
| 1913 | bool abort; |
| 1914 | |
| 1915 | spin_lock_irqsave(&fod->flock, flags); |
| 1916 | abort = fod->abort; |
| 1917 | spin_unlock_irqrestore(&fod->flock, flags); |
| 1918 | |
| 1919 | /* if we have a CQE, snoop the last sq_head value */ |
| 1920 | if (!status) |
| 1921 | fod->queue->sqhd = cqe->sq_head; |
| 1922 | |
| 1923 | if (abort) { |
| 1924 | /* data no longer needed */ |
| 1925 | nvmet_fc_free_tgt_pgs(fod); |
| 1926 | |
| 1927 | nvmet_fc_abort_op(tgtport, fod->fcpreq); |
| 1928 | return; |
| 1929 | } |
| 1930 | |
| 1931 | /* if an error handling the cmd post initial parsing */ |
| 1932 | if (status) { |
| 1933 | /* fudge up a failed CQE status for our transport error */ |
| 1934 | memset(cqe, 0, sizeof(*cqe)); |
| 1935 | cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ |
| 1936 | cqe->sq_id = cpu_to_le16(fod->queue->qid); |
| 1937 | cqe->command_id = sqe->command_id; |
| 1938 | cqe->status = cpu_to_le16(status); |
| 1939 | } else { |
| 1940 | |
| 1941 | /* |
| 1942 | * try to push the data even if the SQE status is non-zero. |
| 1943 | * There may be a status where data still was intended to |
| 1944 | * be moved |
| 1945 | */ |
| 1946 | if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { |
| 1947 | /* push the data over before sending rsp */ |
| 1948 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
| 1949 | NVMET_FCOP_READDATA); |
| 1950 | return; |
| 1951 | } |
| 1952 | |
| 1953 | /* writes & no data - fall thru */ |
| 1954 | } |
| 1955 | |
| 1956 | /* data no longer needed */ |
| 1957 | nvmet_fc_free_tgt_pgs(fod); |
| 1958 | |
| 1959 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
| 1960 | } |
| 1961 | |
| 1962 | |
| 1963 | static void |
| 1964 | nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) |
| 1965 | { |
| 1966 | struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); |
| 1967 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 1968 | |
| 1969 | __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); |
| 1970 | } |
| 1971 | |
| 1972 | |
| 1973 | /* |
| 1974 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
| 1975 | */ |
| 1976 | void |
| 1977 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
| 1978 | struct nvmet_fc_fcp_iod *fod) |
| 1979 | { |
| 1980 | struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; |
| 1981 | int ret; |
| 1982 | |
| 1983 | /* |
| 1984 | * Fused commands are currently not supported in the linux |
| 1985 | * implementation. |
| 1986 | * |
| 1987 | * As such, the implementation of the FC transport does not |
| 1988 | * look at the fused commands and order delivery to the upper |
| 1989 | * layer until we have both based on csn. |
| 1990 | */ |
| 1991 | |
| 1992 | fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; |
| 1993 | |
| 1994 | fod->total_length = be32_to_cpu(cmdiu->data_len); |
| 1995 | if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { |
| 1996 | fod->io_dir = NVMET_FCP_WRITE; |
| 1997 | if (!nvme_is_write(&cmdiu->sqe)) |
| 1998 | goto transport_error; |
| 1999 | } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { |
| 2000 | fod->io_dir = NVMET_FCP_READ; |
| 2001 | if (nvme_is_write(&cmdiu->sqe)) |
| 2002 | goto transport_error; |
| 2003 | } else { |
| 2004 | fod->io_dir = NVMET_FCP_NODATA; |
| 2005 | if (fod->total_length) |
| 2006 | goto transport_error; |
| 2007 | } |
| 2008 | |
| 2009 | fod->req.cmd = &fod->cmdiubuf.sqe; |
| 2010 | fod->req.rsp = &fod->rspiubuf.cqe; |
| 2011 | fod->req.port = fod->queue->port; |
| 2012 | |
| 2013 | /* ensure nvmet handlers will set cmd handler callback */ |
| 2014 | fod->req.execute = NULL; |
| 2015 | |
| 2016 | /* clear any response payload */ |
| 2017 | memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); |
| 2018 | |
| 2019 | ret = nvmet_req_init(&fod->req, |
| 2020 | &fod->queue->nvme_cq, |
| 2021 | &fod->queue->nvme_sq, |
| 2022 | &nvmet_fc_tgt_fcp_ops); |
| 2023 | if (!ret) { /* bad SQE content */ |
| 2024 | nvmet_fc_abort_op(tgtport, fod->fcpreq); |
| 2025 | return; |
| 2026 | } |
| 2027 | |
| 2028 | /* keep a running counter of tail position */ |
| 2029 | atomic_inc(&fod->queue->sqtail); |
| 2030 | |
| 2031 | fod->data_sg = NULL; |
| 2032 | fod->data_sg_cnt = 0; |
| 2033 | if (fod->total_length) { |
| 2034 | ret = nvmet_fc_alloc_tgt_pgs(fod); |
| 2035 | if (ret) { |
| 2036 | nvmet_req_complete(&fod->req, ret); |
| 2037 | return; |
| 2038 | } |
| 2039 | } |
| 2040 | fod->req.sg = fod->data_sg; |
| 2041 | fod->req.sg_cnt = fod->data_sg_cnt; |
| 2042 | fod->offset = 0; |
| 2043 | fod->next_sg = fod->data_sg; |
| 2044 | fod->next_sg_offset = 0; |
| 2045 | |
| 2046 | if (fod->io_dir == NVMET_FCP_WRITE) { |
| 2047 | /* pull the data over before invoking nvmet layer */ |
| 2048 | nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); |
| 2049 | return; |
| 2050 | } |
| 2051 | |
| 2052 | /* |
| 2053 | * Reads or no data: |
| 2054 | * |
| 2055 | * can invoke the nvmet_layer now. If read data, cmd completion will |
| 2056 | * push the data |
| 2057 | */ |
| 2058 | |
| 2059 | fod->req.execute(&fod->req); |
| 2060 | |
| 2061 | return; |
| 2062 | |
| 2063 | transport_error: |
| 2064 | nvmet_fc_abort_op(tgtport, fod->fcpreq); |
| 2065 | } |
| 2066 | |
| 2067 | /* |
| 2068 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
| 2069 | */ |
| 2070 | static void |
| 2071 | nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) |
| 2072 | { |
| 2073 | struct nvmet_fc_fcp_iod *fod = |
| 2074 | container_of(work, struct nvmet_fc_fcp_iod, work); |
| 2075 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 2076 | |
| 2077 | nvmet_fc_handle_fcp_rqst(tgtport, fod); |
| 2078 | } |
| 2079 | |
| 2080 | /** |
| 2081 | * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD |
| 2082 | * upon the reception of a NVME FCP CMD IU. |
| 2083 | * |
| 2084 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
| 2085 | * layer for processing. |
| 2086 | * |
| 2087 | * The nvmet-fc layer will copy cmd payload to an internal structure for |
| 2088 | * processing. As such, upon completion of the routine, the LLDD may |
| 2089 | * immediately free/reuse the CMD IU buffer passed in the call. |
| 2090 | * |
| 2091 | * If this routine returns error, the lldd should abort the exchange. |
| 2092 | * |
| 2093 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
| 2094 | * was receive on. |
| 2095 | * @fcpreq: pointer to a fcpreq request structure to be used to reference |
| 2096 | * the exchange corresponding to the FCP Exchange. |
| 2097 | * @cmdiubuf: pointer to the buffer containing the FCP CMD IU |
| 2098 | * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU |
| 2099 | */ |
| 2100 | int |
| 2101 | nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, |
| 2102 | struct nvmefc_tgt_fcp_req *fcpreq, |
| 2103 | void *cmdiubuf, u32 cmdiubuf_len) |
| 2104 | { |
| 2105 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); |
| 2106 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
| 2107 | struct nvmet_fc_tgt_queue *queue; |
| 2108 | struct nvmet_fc_fcp_iod *fod; |
| 2109 | |
| 2110 | /* validate iu, so the connection id can be used to find the queue */ |
| 2111 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
| 2112 | (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || |
| 2113 | (cmdiu->fc_id != NVME_CMD_FC_ID) || |
| 2114 | (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) |
| 2115 | return -EIO; |
| 2116 | |
| 2117 | |
| 2118 | queue = nvmet_fc_find_target_queue(tgtport, |
| 2119 | be64_to_cpu(cmdiu->connection_id)); |
| 2120 | if (!queue) |
| 2121 | return -ENOTCONN; |
| 2122 | |
| 2123 | /* |
| 2124 | * note: reference taken by find_target_queue |
| 2125 | * After successful fod allocation, the fod will inherit the |
| 2126 | * ownership of that reference and will remove the reference |
| 2127 | * when the fod is freed. |
| 2128 | */ |
| 2129 | |
| 2130 | fod = nvmet_fc_alloc_fcp_iod(queue); |
| 2131 | if (!fod) { |
| 2132 | /* release the queue lookup reference */ |
| 2133 | nvmet_fc_tgt_q_put(queue); |
| 2134 | return -ENOENT; |
| 2135 | } |
| 2136 | |
| 2137 | fcpreq->nvmet_fc_private = fod; |
| 2138 | fod->fcpreq = fcpreq; |
| 2139 | /* |
| 2140 | * put all admin cmds on hw queue id 0. All io commands go to |
| 2141 | * the respective hw queue based on a modulo basis |
| 2142 | */ |
| 2143 | fcpreq->hwqid = queue->qid ? |
| 2144 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; |
| 2145 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); |
| 2146 | |
| 2147 | queue_work_on(queue->cpu, queue->work_q, &fod->work); |
| 2148 | |
| 2149 | return 0; |
| 2150 | } |
| 2151 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
| 2152 | |
| 2153 | enum { |
| 2154 | FCT_TRADDR_ERR = 0, |
| 2155 | FCT_TRADDR_WWNN = 1 << 0, |
| 2156 | FCT_TRADDR_WWPN = 1 << 1, |
| 2157 | }; |
| 2158 | |
| 2159 | struct nvmet_fc_traddr { |
| 2160 | u64 nn; |
| 2161 | u64 pn; |
| 2162 | }; |
| 2163 | |
| 2164 | static const match_table_t traddr_opt_tokens = { |
| 2165 | { FCT_TRADDR_WWNN, "nn-%s" }, |
| 2166 | { FCT_TRADDR_WWPN, "pn-%s" }, |
| 2167 | { FCT_TRADDR_ERR, NULL } |
| 2168 | }; |
| 2169 | |
| 2170 | static int |
| 2171 | nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf) |
| 2172 | { |
| 2173 | substring_t args[MAX_OPT_ARGS]; |
| 2174 | char *options, *o, *p; |
| 2175 | int token, ret = 0; |
| 2176 | u64 token64; |
| 2177 | |
| 2178 | options = o = kstrdup(buf, GFP_KERNEL); |
| 2179 | if (!options) |
| 2180 | return -ENOMEM; |
| 2181 | |
| 2182 | while ((p = strsep(&o, ",\n")) != NULL) { |
| 2183 | if (!*p) |
| 2184 | continue; |
| 2185 | |
| 2186 | token = match_token(p, traddr_opt_tokens, args); |
| 2187 | switch (token) { |
| 2188 | case FCT_TRADDR_WWNN: |
| 2189 | if (match_u64(args, &token64)) { |
| 2190 | ret = -EINVAL; |
| 2191 | goto out; |
| 2192 | } |
| 2193 | traddr->nn = token64; |
| 2194 | break; |
| 2195 | case FCT_TRADDR_WWPN: |
| 2196 | if (match_u64(args, &token64)) { |
| 2197 | ret = -EINVAL; |
| 2198 | goto out; |
| 2199 | } |
| 2200 | traddr->pn = token64; |
| 2201 | break; |
| 2202 | default: |
| 2203 | pr_warn("unknown traddr token or missing value '%s'\n", |
| 2204 | p); |
| 2205 | ret = -EINVAL; |
| 2206 | goto out; |
| 2207 | } |
| 2208 | } |
| 2209 | |
| 2210 | out: |
| 2211 | kfree(options); |
| 2212 | return ret; |
| 2213 | } |
| 2214 | |
| 2215 | static int |
| 2216 | nvmet_fc_add_port(struct nvmet_port *port) |
| 2217 | { |
| 2218 | struct nvmet_fc_tgtport *tgtport; |
| 2219 | struct nvmet_fc_traddr traddr = { 0L, 0L }; |
| 2220 | unsigned long flags; |
| 2221 | int ret; |
| 2222 | |
| 2223 | /* validate the address info */ |
| 2224 | if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || |
| 2225 | (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) |
| 2226 | return -EINVAL; |
| 2227 | |
| 2228 | /* map the traddr address info to a target port */ |
| 2229 | |
| 2230 | ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr); |
| 2231 | if (ret) |
| 2232 | return ret; |
| 2233 | |
| 2234 | ret = -ENXIO; |
| 2235 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 2236 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { |
| 2237 | if ((tgtport->fc_target_port.node_name == traddr.nn) && |
| 2238 | (tgtport->fc_target_port.port_name == traddr.pn)) { |
| 2239 | /* a FC port can only be 1 nvmet port id */ |
| 2240 | if (!tgtport->port) { |
| 2241 | tgtport->port = port; |
| 2242 | port->priv = tgtport; |
| 2243 | ret = 0; |
| 2244 | } else |
| 2245 | ret = -EALREADY; |
| 2246 | break; |
| 2247 | } |
| 2248 | } |
| 2249 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
| 2250 | return ret; |
| 2251 | } |
| 2252 | |
| 2253 | static void |
| 2254 | nvmet_fc_remove_port(struct nvmet_port *port) |
| 2255 | { |
| 2256 | struct nvmet_fc_tgtport *tgtport = port->priv; |
| 2257 | unsigned long flags; |
| 2258 | |
| 2259 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 2260 | if (tgtport->port == port) { |
| 2261 | nvmet_fc_tgtport_put(tgtport); |
| 2262 | tgtport->port = NULL; |
| 2263 | } |
| 2264 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
| 2265 | } |
| 2266 | |
| 2267 | static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
| 2268 | .owner = THIS_MODULE, |
| 2269 | .type = NVMF_TRTYPE_FC, |
| 2270 | .msdbd = 1, |
| 2271 | .add_port = nvmet_fc_add_port, |
| 2272 | .remove_port = nvmet_fc_remove_port, |
| 2273 | .queue_response = nvmet_fc_fcp_nvme_cmd_done, |
| 2274 | .delete_ctrl = nvmet_fc_delete_ctrl, |
| 2275 | }; |
| 2276 | |
| 2277 | static int __init nvmet_fc_init_module(void) |
| 2278 | { |
| 2279 | return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); |
| 2280 | } |
| 2281 | |
| 2282 | static void __exit nvmet_fc_exit_module(void) |
| 2283 | { |
| 2284 | /* sanity check - all lports should be removed */ |
| 2285 | if (!list_empty(&nvmet_fc_target_list)) |
| 2286 | pr_warn("%s: targetport list not empty\n", __func__); |
| 2287 | |
| 2288 | nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); |
| 2289 | |
| 2290 | ida_destroy(&nvmet_fc_tgtport_cnt); |
| 2291 | } |
| 2292 | |
| 2293 | module_init(nvmet_fc_init_module); |
| 2294 | module_exit(nvmet_fc_exit_module); |
| 2295 | |
| 2296 | MODULE_LICENSE("GPL v2"); |