blob: 739b8feadc7dec5c1da9c7e20ace1cca5e5acfb7 [file] [log] [blame]
James Smartc5343202016-12-02 00:28:43 -08001/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/blk-mq.h>
21#include <linux/parser.h>
22#include <linux/random.h>
23#include <uapi/scsi/fc/fc_fs.h>
24#include <uapi/scsi/fc/fc_els.h>
25
26#include "nvmet.h"
27#include <linux/nvme-fc-driver.h>
28#include <linux/nvme-fc.h>
29
30
31/* *************************** Data Structures/Defines ****************** */
32
33
34#define NVMET_LS_CTX_COUNT 4
35
36/* for this implementation, assume small single frame rqst/rsp */
37#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39struct nvmet_fc_tgtport;
40struct nvmet_fc_tgt_assoc;
41
42struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59} __aligned(sizeof(unsigned long long));
60
James Smart48fa3622017-07-31 13:21:14 -070061#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
James Smartc5343202016-12-02 00:28:43 -080063
64enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA,
66 NVMET_FCP_WRITE,
67 NVMET_FCP_READ,
68 NVMET_FCP_ABORTED,
69};
70
71struct nvmet_fc_fcp_iod {
72 struct nvmefc_tgt_fcp_req *fcpreq;
73
74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma;
77 struct scatterlist *data_sg;
James Smartc5343202016-12-02 00:28:43 -080078 int data_sg_cnt;
James Smartc5343202016-12-02 00:28:43 -080079 u32 offset;
80 enum nvmet_fcp_datadir io_dir;
81 bool active;
82 bool abort;
James Smarta97ec512017-04-11 11:32:31 -070083 bool aborted;
84 bool writedataactive;
James Smartc5343202016-12-02 00:28:43 -080085 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct work;
James Smart39498fa2017-04-11 11:32:28 -070089 struct work_struct done_work;
James Smartc5343202016-12-02 00:28:43 -080090
91 struct nvmet_fc_tgtport *tgtport;
92 struct nvmet_fc_tgt_queue *queue;
93
94 struct list_head fcp_list; /* tgtport->fcp_list */
95};
96
97struct nvmet_fc_tgtport {
98
99 struct nvmet_fc_target_port fc_target_port;
100
101 struct list_head tgt_list; /* nvmet_fc_target_list */
102 struct device *dev; /* dev for dma mapping */
103 struct nvmet_fc_target_template *ops;
104
105 struct nvmet_fc_ls_iod *iod;
106 spinlock_t lock;
107 struct list_head ls_list;
108 struct list_head ls_busylist;
109 struct list_head assoc_list;
110 struct ida assoc_cnt;
111 struct nvmet_port *port;
112 struct kref ref;
James Smart48fa3622017-07-31 13:21:14 -0700113 u32 max_sg_cnt;
James Smartc5343202016-12-02 00:28:43 -0800114};
115
James Smart0fb228d2017-08-01 15:12:39 -0700116struct nvmet_fc_defer_fcp_req {
117 struct list_head req_list;
118 struct nvmefc_tgt_fcp_req *fcp_req;
119};
120
James Smartc5343202016-12-02 00:28:43 -0800121struct nvmet_fc_tgt_queue {
122 bool ninetypercent;
123 u16 qid;
124 u16 sqsize;
125 u16 ersp_ratio;
Christoph Hellwigf63688a2017-04-21 10:42:23 +0200126 __le16 sqhd;
James Smartc5343202016-12-02 00:28:43 -0800127 int cpu;
128 atomic_t connected;
129 atomic_t sqtail;
130 atomic_t zrspcnt;
131 atomic_t rsn;
132 spinlock_t qlock;
133 struct nvmet_port *port;
134 struct nvmet_cq nvme_cq;
135 struct nvmet_sq nvme_sq;
136 struct nvmet_fc_tgt_assoc *assoc;
137 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
138 struct list_head fod_list;
James Smart0fb228d2017-08-01 15:12:39 -0700139 struct list_head pending_cmd_list;
140 struct list_head avail_defer_list;
James Smartc5343202016-12-02 00:28:43 -0800141 struct workqueue_struct *work_q;
142 struct kref ref;
143} __aligned(sizeof(unsigned long long));
144
145struct nvmet_fc_tgt_assoc {
146 u64 association_id;
147 u32 a_id;
148 struct nvmet_fc_tgtport *tgtport;
149 struct list_head a_list;
James Smartdeb61742017-09-11 16:16:53 -0700150 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
James Smartc5343202016-12-02 00:28:43 -0800151 struct kref ref;
James Smarta96d4bd2017-10-27 13:12:49 -0700152 struct work_struct del_work;
James Smartc5343202016-12-02 00:28:43 -0800153};
154
155
156static inline int
157nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158{
159 return (iodptr - iodptr->tgtport->iod);
160}
161
162static inline int
163nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164{
165 return (fodptr - fodptr->queue->fod);
166}
167
168
169/*
170 * Association and Connection IDs:
171 *
172 * Association ID will have random number in upper 6 bytes and zero
173 * in lower 2 bytes
174 *
175 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176 *
177 * note: Association ID = Connection ID for queue 0
178 */
179#define BYTES_FOR_QID sizeof(u16)
180#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
181#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182
183static inline u64
184nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185{
186 return (assoc->association_id | qid);
187}
188
189static inline u64
190nvmet_fc_getassociationid(u64 connectionid)
191{
192 return connectionid & ~NVMET_FC_QUEUEID_MASK;
193}
194
195static inline u16
196nvmet_fc_getqueueid(u64 connectionid)
197{
198 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199}
200
201static inline struct nvmet_fc_tgtport *
202targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203{
204 return container_of(targetport, struct nvmet_fc_tgtport,
205 fc_target_port);
206}
207
208static inline struct nvmet_fc_fcp_iod *
209nvmet_req_to_fod(struct nvmet_req *nvme_req)
210{
211 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212}
213
214
215/* *************************** Globals **************************** */
216
217
218static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219
220static LIST_HEAD(nvmet_fc_target_list);
221static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222
223
224static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
James Smart39498fa2017-04-11 11:32:28 -0700226static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
James Smartc5343202016-12-02 00:28:43 -0800227static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
James Smart0fb228d2017-08-01 15:12:39 -0700233static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
James Smarta96d4bd2017-10-27 13:12:49 -0700235static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
James Smartc5343202016-12-02 00:28:43 -0800236
237
238/* *********************** FC-NVME DMA Handling **************************** */
239
240/*
241 * The fcloop device passes in a NULL device pointer. Real LLD's will
242 * pass in a valid device pointer. If NULL is passed to the dma mapping
243 * routines, depending on the platform, it may or may not succeed, and
244 * may crash.
245 *
246 * As such:
247 * Wrapper all the dma routines and check the dev pointer.
248 *
249 * If simple mappings (return just a dma address, we'll noop them,
250 * returning a dma address of 0.
251 *
252 * On more complex mappings (dma_map_sg), a pseudo routine fills
253 * in the scatter list, setting all dma addresses to 0.
254 */
255
256static inline dma_addr_t
257fc_dma_map_single(struct device *dev, void *ptr, size_t size,
258 enum dma_data_direction dir)
259{
260 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
261}
262
263static inline int
264fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
265{
266 return dev ? dma_mapping_error(dev, dma_addr) : 0;
267}
268
269static inline void
270fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
271 enum dma_data_direction dir)
272{
273 if (dev)
274 dma_unmap_single(dev, addr, size, dir);
275}
276
277static inline void
278fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
279 enum dma_data_direction dir)
280{
281 if (dev)
282 dma_sync_single_for_cpu(dev, addr, size, dir);
283}
284
285static inline void
286fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
287 enum dma_data_direction dir)
288{
289 if (dev)
290 dma_sync_single_for_device(dev, addr, size, dir);
291}
292
293/* pseudo dma_map_sg call */
294static int
295fc_map_sg(struct scatterlist *sg, int nents)
296{
297 struct scatterlist *s;
298 int i;
299
300 WARN_ON(nents == 0 || sg[0].length == 0);
301
302 for_each_sg(sg, s, nents, i) {
303 s->dma_address = 0L;
304#ifdef CONFIG_NEED_SG_DMA_LENGTH
305 s->dma_length = s->length;
306#endif
307 }
308 return nents;
309}
310
311static inline int
312fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
313 enum dma_data_direction dir)
314{
315 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
316}
317
318static inline void
319fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
320 enum dma_data_direction dir)
321{
322 if (dev)
323 dma_unmap_sg(dev, sg, nents, dir);
324}
325
326
327/* *********************** FC-NVME Port Management ************************ */
328
329
330static int
331nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
332{
333 struct nvmet_fc_ls_iod *iod;
334 int i;
335
336 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
337 GFP_KERNEL);
338 if (!iod)
339 return -ENOMEM;
340
341 tgtport->iod = iod;
342
343 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
344 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
345 iod->tgtport = tgtport;
346 list_add_tail(&iod->ls_list, &tgtport->ls_list);
347
348 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
349 GFP_KERNEL);
350 if (!iod->rqstbuf)
351 goto out_fail;
352
353 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
354
355 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
356 NVME_FC_MAX_LS_BUFFER_SIZE,
357 DMA_TO_DEVICE);
358 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
359 goto out_fail;
360 }
361
362 return 0;
363
364out_fail:
365 kfree(iod->rqstbuf);
366 list_del(&iod->ls_list);
367 for (iod--, i--; i >= 0; iod--, i--) {
368 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
369 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
370 kfree(iod->rqstbuf);
371 list_del(&iod->ls_list);
372 }
373
374 kfree(iod);
375
376 return -EFAULT;
377}
378
379static void
380nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
381{
382 struct nvmet_fc_ls_iod *iod = tgtport->iod;
383 int i;
384
385 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
386 fc_dma_unmap_single(tgtport->dev,
387 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
388 DMA_TO_DEVICE);
389 kfree(iod->rqstbuf);
390 list_del(&iod->ls_list);
391 }
392 kfree(tgtport->iod);
393}
394
395static struct nvmet_fc_ls_iod *
396nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
397{
James Smart369157b2017-08-16 10:47:03 -0700398 struct nvmet_fc_ls_iod *iod;
James Smartc5343202016-12-02 00:28:43 -0800399 unsigned long flags;
400
401 spin_lock_irqsave(&tgtport->lock, flags);
402 iod = list_first_entry_or_null(&tgtport->ls_list,
403 struct nvmet_fc_ls_iod, ls_list);
404 if (iod)
405 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
406 spin_unlock_irqrestore(&tgtport->lock, flags);
407 return iod;
408}
409
410
411static void
412nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
413 struct nvmet_fc_ls_iod *iod)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&tgtport->lock, flags);
418 list_move(&iod->ls_list, &tgtport->ls_list);
419 spin_unlock_irqrestore(&tgtport->lock, flags);
420}
421
422static void
423nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
424 struct nvmet_fc_tgt_queue *queue)
425{
426 struct nvmet_fc_fcp_iod *fod = queue->fod;
427 int i;
428
429 for (i = 0; i < queue->sqsize; fod++, i++) {
430 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
James Smart39498fa2017-04-11 11:32:28 -0700431 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
James Smartc5343202016-12-02 00:28:43 -0800432 fod->tgtport = tgtport;
433 fod->queue = queue;
434 fod->active = false;
James Smarta97ec512017-04-11 11:32:31 -0700435 fod->abort = false;
436 fod->aborted = false;
437 fod->fcpreq = NULL;
James Smartc5343202016-12-02 00:28:43 -0800438 list_add_tail(&fod->fcp_list, &queue->fod_list);
439 spin_lock_init(&fod->flock);
440
441 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
442 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
443 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
444 list_del(&fod->fcp_list);
445 for (fod--, i--; i >= 0; fod--, i--) {
446 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
447 sizeof(fod->rspiubuf),
448 DMA_TO_DEVICE);
449 fod->rspdma = 0L;
450 list_del(&fod->fcp_list);
451 }
452
453 return;
454 }
455 }
456}
457
458static void
459nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
460 struct nvmet_fc_tgt_queue *queue)
461{
462 struct nvmet_fc_fcp_iod *fod = queue->fod;
463 int i;
464
465 for (i = 0; i < queue->sqsize; fod++, i++) {
466 if (fod->rspdma)
467 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
468 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
469 }
470}
471
472static struct nvmet_fc_fcp_iod *
473nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
474{
James Smart369157b2017-08-16 10:47:03 -0700475 struct nvmet_fc_fcp_iod *fod;
James Smartc5343202016-12-02 00:28:43 -0800476
James Smart0fb228d2017-08-01 15:12:39 -0700477 lockdep_assert_held(&queue->qlock);
478
James Smartc5343202016-12-02 00:28:43 -0800479 fod = list_first_entry_or_null(&queue->fod_list,
480 struct nvmet_fc_fcp_iod, fcp_list);
481 if (fod) {
482 list_del(&fod->fcp_list);
483 fod->active = true;
James Smartc5343202016-12-02 00:28:43 -0800484 /*
485 * no queue reference is taken, as it was taken by the
486 * queue lookup just prior to the allocation. The iod
487 * will "inherit" that reference.
488 */
489 }
James Smartc5343202016-12-02 00:28:43 -0800490 return fod;
491}
492
493
494static void
James Smart0fb228d2017-08-01 15:12:39 -0700495nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
496 struct nvmet_fc_tgt_queue *queue,
497 struct nvmefc_tgt_fcp_req *fcpreq)
498{
499 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
500
501 /*
502 * put all admin cmds on hw queue id 0. All io commands go to
503 * the respective hw queue based on a modulo basis
504 */
505 fcpreq->hwqid = queue->qid ?
506 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
507
508 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
509 queue_work_on(queue->cpu, queue->work_q, &fod->work);
510 else
511 nvmet_fc_handle_fcp_rqst(tgtport, fod);
512}
513
514static void
James Smartc5343202016-12-02 00:28:43 -0800515nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
516 struct nvmet_fc_fcp_iod *fod)
517{
James Smart19b58d92017-04-11 11:32:29 -0700518 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
519 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
James Smart0fb228d2017-08-01 15:12:39 -0700520 struct nvmet_fc_defer_fcp_req *deferfcp;
James Smartc5343202016-12-02 00:28:43 -0800521 unsigned long flags;
522
James Smarta97ec512017-04-11 11:32:31 -0700523 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
524 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
525
526 fcpreq->nvmet_fc_private = NULL;
527
James Smartc5343202016-12-02 00:28:43 -0800528 fod->active = false;
James Smarta97ec512017-04-11 11:32:31 -0700529 fod->abort = false;
530 fod->aborted = false;
531 fod->writedataactive = false;
532 fod->fcpreq = NULL;
James Smart19b58d92017-04-11 11:32:29 -0700533
534 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
James Smart0fb228d2017-08-01 15:12:39 -0700535
536 spin_lock_irqsave(&queue->qlock, flags);
537 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
538 struct nvmet_fc_defer_fcp_req, req_list);
539 if (!deferfcp) {
540 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
541 spin_unlock_irqrestore(&queue->qlock, flags);
542
543 /* Release reference taken at queue lookup and fod allocation */
544 nvmet_fc_tgt_q_put(queue);
545 return;
546 }
547
548 /* Re-use the fod for the next pending cmd that was deferred */
549 list_del(&deferfcp->req_list);
550
551 fcpreq = deferfcp->fcp_req;
552
553 /* deferfcp can be reused for another IO at a later date */
554 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
555
556 spin_unlock_irqrestore(&queue->qlock, flags);
557
558 /* Save NVME CMD IO in fod */
559 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
560
561 /* Setup new fcpreq to be processed */
562 fcpreq->rspaddr = NULL;
563 fcpreq->rsplen = 0;
564 fcpreq->nvmet_fc_private = fod;
565 fod->fcpreq = fcpreq;
566 fod->active = true;
567
568 /* inform LLDD IO is now being processed */
569 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
570
571 /* Submit deferred IO for processing */
572 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
573
574 /*
575 * Leave the queue lookup get reference taken when
576 * fod was originally allocated.
577 */
James Smartc5343202016-12-02 00:28:43 -0800578}
579
580static int
581nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
582{
583 int cpu, idx, cnt;
584
James Smart4b8ba5f2017-04-25 16:23:09 -0700585 if (tgtport->ops->max_hw_queues == 1)
James Smartc5343202016-12-02 00:28:43 -0800586 return WORK_CPU_UNBOUND;
587
588 /* Simple cpu selection based on qid modulo active cpu count */
589 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
590
591 /* find the n'th active cpu */
592 for (cpu = 0, cnt = 0; ; ) {
593 if (cpu_active(cpu)) {
594 if (cnt == idx)
595 break;
596 cnt++;
597 }
598 cpu = (cpu + 1) % num_possible_cpus();
599 }
600
601 return cpu;
602}
603
604static struct nvmet_fc_tgt_queue *
605nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
606 u16 qid, u16 sqsize)
607{
608 struct nvmet_fc_tgt_queue *queue;
609 unsigned long flags;
610 int ret;
611
James Smartdeb61742017-09-11 16:16:53 -0700612 if (qid > NVMET_NR_QUEUES)
James Smartc5343202016-12-02 00:28:43 -0800613 return NULL;
614
615 queue = kzalloc((sizeof(*queue) +
616 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
617 GFP_KERNEL);
618 if (!queue)
619 return NULL;
620
621 if (!nvmet_fc_tgt_a_get(assoc))
622 goto out_free_queue;
623
624 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
625 assoc->tgtport->fc_target_port.port_num,
626 assoc->a_id, qid);
627 if (!queue->work_q)
628 goto out_a_put;
629
630 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
631 queue->qid = qid;
632 queue->sqsize = sqsize;
633 queue->assoc = assoc;
634 queue->port = assoc->tgtport->port;
635 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
636 INIT_LIST_HEAD(&queue->fod_list);
James Smart0fb228d2017-08-01 15:12:39 -0700637 INIT_LIST_HEAD(&queue->avail_defer_list);
638 INIT_LIST_HEAD(&queue->pending_cmd_list);
James Smartc5343202016-12-02 00:28:43 -0800639 atomic_set(&queue->connected, 0);
640 atomic_set(&queue->sqtail, 0);
641 atomic_set(&queue->rsn, 1);
642 atomic_set(&queue->zrspcnt, 0);
643 spin_lock_init(&queue->qlock);
644 kref_init(&queue->ref);
645
646 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
647
648 ret = nvmet_sq_init(&queue->nvme_sq);
649 if (ret)
650 goto out_fail_iodlist;
651
652 WARN_ON(assoc->queues[qid]);
653 spin_lock_irqsave(&assoc->tgtport->lock, flags);
654 assoc->queues[qid] = queue;
655 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
656
657 return queue;
658
659out_fail_iodlist:
660 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
661 destroy_workqueue(queue->work_q);
662out_a_put:
663 nvmet_fc_tgt_a_put(assoc);
664out_free_queue:
665 kfree(queue);
666 return NULL;
667}
668
669
670static void
671nvmet_fc_tgt_queue_free(struct kref *ref)
672{
673 struct nvmet_fc_tgt_queue *queue =
674 container_of(ref, struct nvmet_fc_tgt_queue, ref);
675 unsigned long flags;
676
677 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
678 queue->assoc->queues[queue->qid] = NULL;
679 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
680
681 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
682
683 nvmet_fc_tgt_a_put(queue->assoc);
684
685 destroy_workqueue(queue->work_q);
686
687 kfree(queue);
688}
689
690static void
691nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
692{
693 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
694}
695
696static int
697nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
698{
699 return kref_get_unless_zero(&queue->ref);
700}
701
702
703static void
James Smartc5343202016-12-02 00:28:43 -0800704nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
705{
James Smarta97ec512017-04-11 11:32:31 -0700706 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
James Smartc5343202016-12-02 00:28:43 -0800707 struct nvmet_fc_fcp_iod *fod = queue->fod;
James Smart16a5a482017-08-14 11:20:32 -0700708 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
James Smartc5343202016-12-02 00:28:43 -0800709 unsigned long flags;
James Smarta97ec512017-04-11 11:32:31 -0700710 int i, writedataactive;
James Smartc5343202016-12-02 00:28:43 -0800711 bool disconnect;
712
713 disconnect = atomic_xchg(&queue->connected, 0);
714
715 spin_lock_irqsave(&queue->qlock, flags);
716 /* about outstanding io's */
717 for (i = 0; i < queue->sqsize; fod++, i++) {
718 if (fod->active) {
719 spin_lock(&fod->flock);
720 fod->abort = true;
James Smarta97ec512017-04-11 11:32:31 -0700721 writedataactive = fod->writedataactive;
James Smartc5343202016-12-02 00:28:43 -0800722 spin_unlock(&fod->flock);
James Smarta97ec512017-04-11 11:32:31 -0700723 /*
724 * only call lldd abort routine if waiting for
725 * writedata. other outstanding ops should finish
726 * on their own.
727 */
728 if (writedataactive) {
729 spin_lock(&fod->flock);
730 fod->aborted = true;
731 spin_unlock(&fod->flock);
732 tgtport->ops->fcp_abort(
733 &tgtport->fc_target_port, fod->fcpreq);
734 }
James Smartc5343202016-12-02 00:28:43 -0800735 }
736 }
James Smart0fb228d2017-08-01 15:12:39 -0700737
738 /* Cleanup defer'ed IOs in queue */
James Smart16a5a482017-08-14 11:20:32 -0700739 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
740 req_list) {
James Smart0fb228d2017-08-01 15:12:39 -0700741 list_del(&deferfcp->req_list);
742 kfree(deferfcp);
743 }
744
745 for (;;) {
746 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
747 struct nvmet_fc_defer_fcp_req, req_list);
748 if (!deferfcp)
749 break;
750
751 list_del(&deferfcp->req_list);
752 spin_unlock_irqrestore(&queue->qlock, flags);
753
754 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
755 deferfcp->fcp_req);
756
757 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
758 deferfcp->fcp_req);
759
760 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
761 deferfcp->fcp_req);
762
763 kfree(deferfcp);
764
765 spin_lock_irqsave(&queue->qlock, flags);
766 }
James Smartc5343202016-12-02 00:28:43 -0800767 spin_unlock_irqrestore(&queue->qlock, flags);
768
769 flush_workqueue(queue->work_q);
770
771 if (disconnect)
772 nvmet_sq_destroy(&queue->nvme_sq);
773
774 nvmet_fc_tgt_q_put(queue);
775}
776
777static struct nvmet_fc_tgt_queue *
778nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
779 u64 connection_id)
780{
781 struct nvmet_fc_tgt_assoc *assoc;
782 struct nvmet_fc_tgt_queue *queue;
783 u64 association_id = nvmet_fc_getassociationid(connection_id);
784 u16 qid = nvmet_fc_getqueueid(connection_id);
785 unsigned long flags;
786
James Smart0c319d32017-09-19 16:33:56 -0700787 if (qid > NVMET_NR_QUEUES)
788 return NULL;
789
James Smartc5343202016-12-02 00:28:43 -0800790 spin_lock_irqsave(&tgtport->lock, flags);
791 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
792 if (association_id == assoc->association_id) {
793 queue = assoc->queues[qid];
794 if (queue &&
795 (!atomic_read(&queue->connected) ||
796 !nvmet_fc_tgt_q_get(queue)))
797 queue = NULL;
798 spin_unlock_irqrestore(&tgtport->lock, flags);
799 return queue;
800 }
801 }
802 spin_unlock_irqrestore(&tgtport->lock, flags);
803 return NULL;
804}
805
James Smarta96d4bd2017-10-27 13:12:49 -0700806static void
807nvmet_fc_delete_assoc(struct work_struct *work)
808{
809 struct nvmet_fc_tgt_assoc *assoc =
810 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
811
812 nvmet_fc_delete_target_assoc(assoc);
813 nvmet_fc_tgt_a_put(assoc);
814}
815
James Smartc5343202016-12-02 00:28:43 -0800816static struct nvmet_fc_tgt_assoc *
817nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
818{
819 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
820 unsigned long flags;
821 u64 ran;
822 int idx;
823 bool needrandom = true;
824
825 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
826 if (!assoc)
827 return NULL;
828
829 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
830 if (idx < 0)
831 goto out_free_assoc;
832
833 if (!nvmet_fc_tgtport_get(tgtport))
834 goto out_ida_put;
835
836 assoc->tgtport = tgtport;
837 assoc->a_id = idx;
838 INIT_LIST_HEAD(&assoc->a_list);
839 kref_init(&assoc->ref);
James Smarta96d4bd2017-10-27 13:12:49 -0700840 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
James Smartc5343202016-12-02 00:28:43 -0800841
842 while (needrandom) {
843 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
844 ran = ran << BYTES_FOR_QID_SHIFT;
845
846 spin_lock_irqsave(&tgtport->lock, flags);
847 needrandom = false;
848 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
849 if (ran == tmpassoc->association_id) {
850 needrandom = true;
851 break;
852 }
853 if (!needrandom) {
854 assoc->association_id = ran;
855 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
856 }
857 spin_unlock_irqrestore(&tgtport->lock, flags);
858 }
859
860 return assoc;
861
862out_ida_put:
863 ida_simple_remove(&tgtport->assoc_cnt, idx);
864out_free_assoc:
865 kfree(assoc);
866 return NULL;
867}
868
869static void
870nvmet_fc_target_assoc_free(struct kref *ref)
871{
872 struct nvmet_fc_tgt_assoc *assoc =
873 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
874 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
875 unsigned long flags;
876
877 spin_lock_irqsave(&tgtport->lock, flags);
878 list_del(&assoc->a_list);
879 spin_unlock_irqrestore(&tgtport->lock, flags);
880 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
881 kfree(assoc);
882 nvmet_fc_tgtport_put(tgtport);
883}
884
885static void
886nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
887{
888 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
889}
890
891static int
892nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
893{
894 return kref_get_unless_zero(&assoc->ref);
895}
896
897static void
898nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
899{
900 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
901 struct nvmet_fc_tgt_queue *queue;
902 unsigned long flags;
903 int i;
904
905 spin_lock_irqsave(&tgtport->lock, flags);
James Smartdeb61742017-09-11 16:16:53 -0700906 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
James Smartc5343202016-12-02 00:28:43 -0800907 queue = assoc->queues[i];
908 if (queue) {
909 if (!nvmet_fc_tgt_q_get(queue))
910 continue;
911 spin_unlock_irqrestore(&tgtport->lock, flags);
912 nvmet_fc_delete_target_queue(queue);
913 nvmet_fc_tgt_q_put(queue);
914 spin_lock_irqsave(&tgtport->lock, flags);
915 }
916 }
917 spin_unlock_irqrestore(&tgtport->lock, flags);
918
919 nvmet_fc_tgt_a_put(assoc);
920}
921
922static struct nvmet_fc_tgt_assoc *
923nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
924 u64 association_id)
925{
926 struct nvmet_fc_tgt_assoc *assoc;
927 struct nvmet_fc_tgt_assoc *ret = NULL;
928 unsigned long flags;
929
930 spin_lock_irqsave(&tgtport->lock, flags);
931 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
932 if (association_id == assoc->association_id) {
933 ret = assoc;
934 nvmet_fc_tgt_a_get(assoc);
935 break;
936 }
937 }
938 spin_unlock_irqrestore(&tgtport->lock, flags);
939
940 return ret;
941}
942
943
944/**
945 * nvme_fc_register_targetport - transport entry point called by an
946 * LLDD to register the existence of a local
947 * NVME subystem FC port.
948 * @pinfo: pointer to information about the port to be registered
949 * @template: LLDD entrypoints and operational parameters for the port
950 * @dev: physical hardware device node port corresponds to. Will be
951 * used for DMA mappings
952 * @portptr: pointer to a local port pointer. Upon success, the routine
953 * will allocate a nvme_fc_local_port structure and place its
954 * address in the local port pointer. Upon failure, local port
955 * pointer will be set to NULL.
956 *
957 * Returns:
958 * a completion status. Must be 0 upon success; a negative errno
959 * (ex: -ENXIO) upon failure.
960 */
961int
962nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
963 struct nvmet_fc_target_template *template,
964 struct device *dev,
965 struct nvmet_fc_target_port **portptr)
966{
967 struct nvmet_fc_tgtport *newrec;
968 unsigned long flags;
969 int ret, idx;
970
971 if (!template->xmt_ls_rsp || !template->fcp_op ||
James Smarta97ec512017-04-11 11:32:31 -0700972 !template->fcp_abort ||
James Smart19b58d92017-04-11 11:32:29 -0700973 !template->fcp_req_release || !template->targetport_delete ||
James Smartc5343202016-12-02 00:28:43 -0800974 !template->max_hw_queues || !template->max_sgl_segments ||
975 !template->max_dif_sgl_segments || !template->dma_boundary) {
976 ret = -EINVAL;
977 goto out_regtgt_failed;
978 }
979
980 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
981 GFP_KERNEL);
982 if (!newrec) {
983 ret = -ENOMEM;
984 goto out_regtgt_failed;
985 }
986
987 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
988 if (idx < 0) {
989 ret = -ENOSPC;
990 goto out_fail_kfree;
991 }
992
993 if (!get_device(dev) && dev) {
994 ret = -ENODEV;
995 goto out_ida_put;
996 }
997
998 newrec->fc_target_port.node_name = pinfo->node_name;
999 newrec->fc_target_port.port_name = pinfo->port_name;
1000 newrec->fc_target_port.private = &newrec[1];
1001 newrec->fc_target_port.port_id = pinfo->port_id;
1002 newrec->fc_target_port.port_num = idx;
1003 INIT_LIST_HEAD(&newrec->tgt_list);
1004 newrec->dev = dev;
1005 newrec->ops = template;
1006 spin_lock_init(&newrec->lock);
1007 INIT_LIST_HEAD(&newrec->ls_list);
1008 INIT_LIST_HEAD(&newrec->ls_busylist);
1009 INIT_LIST_HEAD(&newrec->assoc_list);
1010 kref_init(&newrec->ref);
1011 ida_init(&newrec->assoc_cnt);
James Smart48fa3622017-07-31 13:21:14 -07001012 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
1013 template->max_sgl_segments);
James Smartc5343202016-12-02 00:28:43 -08001014
1015 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1016 if (ret) {
1017 ret = -ENOMEM;
1018 goto out_free_newrec;
1019 }
1020
1021 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1022 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1023 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1024
1025 *portptr = &newrec->fc_target_port;
1026 return 0;
1027
1028out_free_newrec:
1029 put_device(dev);
1030out_ida_put:
1031 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1032out_fail_kfree:
1033 kfree(newrec);
1034out_regtgt_failed:
1035 *portptr = NULL;
1036 return ret;
1037}
1038EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1039
1040
1041static void
1042nvmet_fc_free_tgtport(struct kref *ref)
1043{
1044 struct nvmet_fc_tgtport *tgtport =
1045 container_of(ref, struct nvmet_fc_tgtport, ref);
1046 struct device *dev = tgtport->dev;
1047 unsigned long flags;
1048
1049 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1050 list_del(&tgtport->tgt_list);
1051 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1052
1053 nvmet_fc_free_ls_iodlist(tgtport);
1054
1055 /* let the LLDD know we've finished tearing it down */
1056 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1057
1058 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1059 tgtport->fc_target_port.port_num);
1060
1061 ida_destroy(&tgtport->assoc_cnt);
1062
1063 kfree(tgtport);
1064
1065 put_device(dev);
1066}
1067
1068static void
1069nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1070{
1071 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1072}
1073
1074static int
1075nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1076{
1077 return kref_get_unless_zero(&tgtport->ref);
1078}
1079
1080static void
1081__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1082{
1083 struct nvmet_fc_tgt_assoc *assoc, *next;
1084 unsigned long flags;
1085
1086 spin_lock_irqsave(&tgtport->lock, flags);
1087 list_for_each_entry_safe(assoc, next,
1088 &tgtport->assoc_list, a_list) {
1089 if (!nvmet_fc_tgt_a_get(assoc))
1090 continue;
1091 spin_unlock_irqrestore(&tgtport->lock, flags);
1092 nvmet_fc_delete_target_assoc(assoc);
1093 nvmet_fc_tgt_a_put(assoc);
1094 spin_lock_irqsave(&tgtport->lock, flags);
1095 }
1096 spin_unlock_irqrestore(&tgtport->lock, flags);
1097}
1098
1099/*
1100 * nvmet layer has called to terminate an association
1101 */
1102static void
1103nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1104{
1105 struct nvmet_fc_tgtport *tgtport, *next;
1106 struct nvmet_fc_tgt_assoc *assoc;
1107 struct nvmet_fc_tgt_queue *queue;
1108 unsigned long flags;
1109 bool found_ctrl = false;
1110
1111 /* this is a bit ugly, but don't want to make locks layered */
1112 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1113 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1114 tgt_list) {
1115 if (!nvmet_fc_tgtport_get(tgtport))
1116 continue;
1117 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1118
1119 spin_lock_irqsave(&tgtport->lock, flags);
1120 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1121 queue = assoc->queues[0];
1122 if (queue && queue->nvme_sq.ctrl == ctrl) {
1123 if (nvmet_fc_tgt_a_get(assoc))
1124 found_ctrl = true;
1125 break;
1126 }
1127 }
1128 spin_unlock_irqrestore(&tgtport->lock, flags);
1129
1130 nvmet_fc_tgtport_put(tgtport);
1131
1132 if (found_ctrl) {
James Smarta96d4bd2017-10-27 13:12:49 -07001133 schedule_work(&assoc->del_work);
James Smartc5343202016-12-02 00:28:43 -08001134 return;
1135 }
1136
1137 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1138 }
1139 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1140}
1141
1142/**
1143 * nvme_fc_unregister_targetport - transport entry point called by an
1144 * LLDD to deregister/remove a previously
1145 * registered a local NVME subsystem FC port.
1146 * @tgtport: pointer to the (registered) target port that is to be
1147 * deregistered.
1148 *
1149 * Returns:
1150 * a completion status. Must be 0 upon success; a negative errno
1151 * (ex: -ENXIO) upon failure.
1152 */
1153int
1154nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1155{
1156 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1157
1158 /* terminate any outstanding associations */
1159 __nvmet_fc_free_assocs(tgtport);
1160
1161 nvmet_fc_tgtport_put(tgtport);
1162
1163 return 0;
1164}
1165EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1166
1167
1168/* *********************** FC-NVME LS Handling **************************** */
1169
1170
1171static void
Christoph Hellwig3f5e1182017-04-21 10:40:22 +02001172nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
James Smartc5343202016-12-02 00:28:43 -08001173{
1174 struct fcnvme_ls_acc_hdr *acc = buf;
1175
1176 acc->w0.ls_cmd = ls_cmd;
1177 acc->desc_list_len = desc_len;
1178 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1179 acc->rqst.desc_len =
1180 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1181 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1182}
1183
1184static int
1185nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1186 u8 reason, u8 explanation, u8 vendor)
1187{
1188 struct fcnvme_ls_rjt *rjt = buf;
1189
1190 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1191 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1192 ls_cmd);
1193 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1194 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1195 rjt->rjt.reason_code = reason;
1196 rjt->rjt.reason_explanation = explanation;
1197 rjt->rjt.vendor = vendor;
1198
1199 return sizeof(struct fcnvme_ls_rjt);
1200}
1201
1202/* Validation Error indexes into the string table below */
1203enum {
1204 VERR_NO_ERROR = 0,
1205 VERR_CR_ASSOC_LEN = 1,
1206 VERR_CR_ASSOC_RQST_LEN = 2,
1207 VERR_CR_ASSOC_CMD = 3,
1208 VERR_CR_ASSOC_CMD_LEN = 4,
1209 VERR_ERSP_RATIO = 5,
1210 VERR_ASSOC_ALLOC_FAIL = 6,
1211 VERR_QUEUE_ALLOC_FAIL = 7,
1212 VERR_CR_CONN_LEN = 8,
1213 VERR_CR_CONN_RQST_LEN = 9,
1214 VERR_ASSOC_ID = 10,
1215 VERR_ASSOC_ID_LEN = 11,
1216 VERR_NO_ASSOC = 12,
1217 VERR_CONN_ID = 13,
1218 VERR_CONN_ID_LEN = 14,
1219 VERR_NO_CONN = 15,
1220 VERR_CR_CONN_CMD = 16,
1221 VERR_CR_CONN_CMD_LEN = 17,
1222 VERR_DISCONN_LEN = 18,
1223 VERR_DISCONN_RQST_LEN = 19,
1224 VERR_DISCONN_CMD = 20,
1225 VERR_DISCONN_CMD_LEN = 21,
1226 VERR_DISCONN_SCOPE = 22,
1227 VERR_RS_LEN = 23,
1228 VERR_RS_RQST_LEN = 24,
1229 VERR_RS_CMD = 25,
1230 VERR_RS_CMD_LEN = 26,
1231 VERR_RS_RCTL = 27,
1232 VERR_RS_RO = 28,
1233};
1234
1235static char *validation_errors[] = {
1236 "OK",
1237 "Bad CR_ASSOC Length",
1238 "Bad CR_ASSOC Rqst Length",
1239 "Not CR_ASSOC Cmd",
1240 "Bad CR_ASSOC Cmd Length",
1241 "Bad Ersp Ratio",
1242 "Association Allocation Failed",
1243 "Queue Allocation Failed",
1244 "Bad CR_CONN Length",
1245 "Bad CR_CONN Rqst Length",
1246 "Not Association ID",
1247 "Bad Association ID Length",
1248 "No Association",
1249 "Not Connection ID",
1250 "Bad Connection ID Length",
1251 "No Connection",
1252 "Not CR_CONN Cmd",
1253 "Bad CR_CONN Cmd Length",
1254 "Bad DISCONN Length",
1255 "Bad DISCONN Rqst Length",
1256 "Not DISCONN Cmd",
1257 "Bad DISCONN Cmd Length",
1258 "Bad Disconnect Scope",
1259 "Bad RS Length",
1260 "Bad RS Rqst Length",
1261 "Not RS Cmd",
1262 "Bad RS Cmd Length",
1263 "Bad RS R_CTL",
1264 "Bad RS Relative Offset",
1265};
1266
1267static void
1268nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1269 struct nvmet_fc_ls_iod *iod)
1270{
1271 struct fcnvme_ls_cr_assoc_rqst *rqst =
1272 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1273 struct fcnvme_ls_cr_assoc_acc *acc =
1274 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1275 struct nvmet_fc_tgt_queue *queue;
1276 int ret = 0;
1277
1278 memset(acc, 0, sizeof(*acc));
1279
James Smart4cb7ca82017-07-07 18:08:26 -07001280 /*
1281 * FC-NVME spec changes. There are initiators sending different
1282 * lengths as padding sizes for Create Association Cmd descriptor
1283 * was incorrect.
1284 * Accept anything of "minimum" length. Assume format per 1.15
1285 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1286 * trailing pad length is.
1287 */
1288 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
James Smartc5343202016-12-02 00:28:43 -08001289 ret = VERR_CR_ASSOC_LEN;
Christoph Hellwig7722ecd2017-07-14 11:14:46 +02001290 else if (be32_to_cpu(rqst->desc_list_len) <
1291 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
James Smartc5343202016-12-02 00:28:43 -08001292 ret = VERR_CR_ASSOC_RQST_LEN;
1293 else if (rqst->assoc_cmd.desc_tag !=
1294 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1295 ret = VERR_CR_ASSOC_CMD;
Christoph Hellwig7722ecd2017-07-14 11:14:46 +02001296 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1297 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
James Smartc5343202016-12-02 00:28:43 -08001298 ret = VERR_CR_ASSOC_CMD_LEN;
1299 else if (!rqst->assoc_cmd.ersp_ratio ||
1300 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1301 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1302 ret = VERR_ERSP_RATIO;
1303
1304 else {
1305 /* new association w/ admin queue */
1306 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1307 if (!iod->assoc)
1308 ret = VERR_ASSOC_ALLOC_FAIL;
1309 else {
1310 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1311 be16_to_cpu(rqst->assoc_cmd.sqsize));
1312 if (!queue)
1313 ret = VERR_QUEUE_ALLOC_FAIL;
1314 }
1315 }
1316
1317 if (ret) {
1318 dev_err(tgtport->dev,
1319 "Create Association LS failed: %s\n",
1320 validation_errors[ret]);
1321 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1322 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
James Smart4083aa92017-03-23 20:41:24 -07001323 FCNVME_RJT_RC_LOGIC,
1324 FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001325 return;
1326 }
1327
1328 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1329 atomic_set(&queue->connected, 1);
1330 queue->sqhd = 0; /* best place to init value */
1331
1332 /* format a response */
1333
1334 iod->lsreq->rsplen = sizeof(*acc);
1335
1336 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1337 fcnvme_lsdesc_len(
1338 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1339 FCNVME_LS_CREATE_ASSOCIATION);
1340 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1341 acc->associd.desc_len =
1342 fcnvme_lsdesc_len(
1343 sizeof(struct fcnvme_lsdesc_assoc_id));
1344 acc->associd.association_id =
1345 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1346 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1347 acc->connectid.desc_len =
1348 fcnvme_lsdesc_len(
1349 sizeof(struct fcnvme_lsdesc_conn_id));
1350 acc->connectid.connection_id = acc->associd.association_id;
1351}
1352
1353static void
1354nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1355 struct nvmet_fc_ls_iod *iod)
1356{
1357 struct fcnvme_ls_cr_conn_rqst *rqst =
1358 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1359 struct fcnvme_ls_cr_conn_acc *acc =
1360 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1361 struct nvmet_fc_tgt_queue *queue;
1362 int ret = 0;
1363
1364 memset(acc, 0, sizeof(*acc));
1365
1366 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1367 ret = VERR_CR_CONN_LEN;
1368 else if (rqst->desc_list_len !=
1369 fcnvme_lsdesc_len(
1370 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1371 ret = VERR_CR_CONN_RQST_LEN;
1372 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1373 ret = VERR_ASSOC_ID;
1374 else if (rqst->associd.desc_len !=
1375 fcnvme_lsdesc_len(
1376 sizeof(struct fcnvme_lsdesc_assoc_id)))
1377 ret = VERR_ASSOC_ID_LEN;
1378 else if (rqst->connect_cmd.desc_tag !=
1379 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1380 ret = VERR_CR_CONN_CMD;
1381 else if (rqst->connect_cmd.desc_len !=
1382 fcnvme_lsdesc_len(
1383 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1384 ret = VERR_CR_CONN_CMD_LEN;
1385 else if (!rqst->connect_cmd.ersp_ratio ||
1386 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1387 be16_to_cpu(rqst->connect_cmd.sqsize)))
1388 ret = VERR_ERSP_RATIO;
1389
1390 else {
1391 /* new io queue */
1392 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1393 be64_to_cpu(rqst->associd.association_id));
1394 if (!iod->assoc)
1395 ret = VERR_NO_ASSOC;
1396 else {
1397 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1398 be16_to_cpu(rqst->connect_cmd.qid),
1399 be16_to_cpu(rqst->connect_cmd.sqsize));
1400 if (!queue)
1401 ret = VERR_QUEUE_ALLOC_FAIL;
1402
1403 /* release get taken in nvmet_fc_find_target_assoc */
1404 nvmet_fc_tgt_a_put(iod->assoc);
1405 }
1406 }
1407
1408 if (ret) {
1409 dev_err(tgtport->dev,
1410 "Create Connection LS failed: %s\n",
1411 validation_errors[ret]);
1412 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1413 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1414 (ret == VERR_NO_ASSOC) ?
James Smart4083aa92017-03-23 20:41:24 -07001415 FCNVME_RJT_RC_INV_ASSOC :
1416 FCNVME_RJT_RC_LOGIC,
1417 FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001418 return;
1419 }
1420
1421 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1422 atomic_set(&queue->connected, 1);
1423 queue->sqhd = 0; /* best place to init value */
1424
1425 /* format a response */
1426
1427 iod->lsreq->rsplen = sizeof(*acc);
1428
1429 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1430 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1431 FCNVME_LS_CREATE_CONNECTION);
1432 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1433 acc->connectid.desc_len =
1434 fcnvme_lsdesc_len(
1435 sizeof(struct fcnvme_lsdesc_conn_id));
1436 acc->connectid.connection_id =
1437 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1438 be16_to_cpu(rqst->connect_cmd.qid)));
1439}
1440
1441static void
1442nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1443 struct nvmet_fc_ls_iod *iod)
1444{
1445 struct fcnvme_ls_disconnect_rqst *rqst =
1446 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1447 struct fcnvme_ls_disconnect_acc *acc =
1448 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
James Smartc81e55e2016-12-24 09:46:43 -08001449 struct nvmet_fc_tgt_queue *queue = NULL;
James Smartc5343202016-12-02 00:28:43 -08001450 struct nvmet_fc_tgt_assoc *assoc;
1451 int ret = 0;
1452 bool del_assoc = false;
1453
1454 memset(acc, 0, sizeof(*acc));
1455
1456 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1457 ret = VERR_DISCONN_LEN;
1458 else if (rqst->desc_list_len !=
1459 fcnvme_lsdesc_len(
1460 sizeof(struct fcnvme_ls_disconnect_rqst)))
1461 ret = VERR_DISCONN_RQST_LEN;
1462 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1463 ret = VERR_ASSOC_ID;
1464 else if (rqst->associd.desc_len !=
1465 fcnvme_lsdesc_len(
1466 sizeof(struct fcnvme_lsdesc_assoc_id)))
1467 ret = VERR_ASSOC_ID_LEN;
1468 else if (rqst->discon_cmd.desc_tag !=
1469 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1470 ret = VERR_DISCONN_CMD;
1471 else if (rqst->discon_cmd.desc_len !=
1472 fcnvme_lsdesc_len(
1473 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1474 ret = VERR_DISCONN_CMD_LEN;
1475 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1476 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1477 ret = VERR_DISCONN_SCOPE;
1478 else {
1479 /* match an active association */
1480 assoc = nvmet_fc_find_target_assoc(tgtport,
1481 be64_to_cpu(rqst->associd.association_id));
1482 iod->assoc = assoc;
James Smartc81e55e2016-12-24 09:46:43 -08001483 if (assoc) {
1484 if (rqst->discon_cmd.scope ==
1485 FCNVME_DISCONN_CONNECTION) {
1486 queue = nvmet_fc_find_target_queue(tgtport,
1487 be64_to_cpu(
1488 rqst->discon_cmd.id));
1489 if (!queue) {
1490 nvmet_fc_tgt_a_put(assoc);
1491 ret = VERR_NO_CONN;
1492 }
1493 }
1494 } else
James Smartc5343202016-12-02 00:28:43 -08001495 ret = VERR_NO_ASSOC;
1496 }
1497
1498 if (ret) {
1499 dev_err(tgtport->dev,
1500 "Disconnect LS failed: %s\n",
1501 validation_errors[ret]);
1502 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1503 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
James Smart4083aa92017-03-23 20:41:24 -07001504 (ret == VERR_NO_ASSOC) ?
1505 FCNVME_RJT_RC_INV_ASSOC :
1506 (ret == VERR_NO_CONN) ?
1507 FCNVME_RJT_RC_INV_CONN :
1508 FCNVME_RJT_RC_LOGIC,
1509 FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001510 return;
1511 }
1512
1513 /* format a response */
1514
1515 iod->lsreq->rsplen = sizeof(*acc);
1516
1517 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1518 fcnvme_lsdesc_len(
1519 sizeof(struct fcnvme_ls_disconnect_acc)),
1520 FCNVME_LS_DISCONNECT);
1521
1522
James Smartc81e55e2016-12-24 09:46:43 -08001523 /* are we to delete a Connection ID (queue) */
1524 if (queue) {
1525 int qid = queue->qid;
James Smartc5343202016-12-02 00:28:43 -08001526
James Smartc81e55e2016-12-24 09:46:43 -08001527 nvmet_fc_delete_target_queue(queue);
James Smartc5343202016-12-02 00:28:43 -08001528
James Smartc81e55e2016-12-24 09:46:43 -08001529 /* release the get taken by find_target_queue */
1530 nvmet_fc_tgt_q_put(queue);
James Smartc5343202016-12-02 00:28:43 -08001531
James Smartc81e55e2016-12-24 09:46:43 -08001532 /* tear association down if io queue terminated */
1533 if (!qid)
1534 del_assoc = true;
James Smartc5343202016-12-02 00:28:43 -08001535 }
1536
1537 /* release get taken in nvmet_fc_find_target_assoc */
1538 nvmet_fc_tgt_a_put(iod->assoc);
1539
1540 if (del_assoc)
1541 nvmet_fc_delete_target_assoc(iod->assoc);
1542}
1543
1544
1545/* *********************** NVME Ctrl Routines **************************** */
1546
1547
1548static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1549
1550static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1551
1552static void
1553nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1554{
1555 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1556 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1557
1558 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1559 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1560 nvmet_fc_free_ls_iod(tgtport, iod);
1561 nvmet_fc_tgtport_put(tgtport);
1562}
1563
1564static void
1565nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1566 struct nvmet_fc_ls_iod *iod)
1567{
1568 int ret;
1569
1570 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1571 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1572
1573 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1574 if (ret)
1575 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1576}
1577
1578/*
1579 * Actual processing routine for received FC-NVME LS Requests from the LLD
1580 */
1581static void
1582nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1583 struct nvmet_fc_ls_iod *iod)
1584{
1585 struct fcnvme_ls_rqst_w0 *w0 =
1586 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1587
1588 iod->lsreq->nvmet_fc_private = iod;
1589 iod->lsreq->rspbuf = iod->rspbuf;
1590 iod->lsreq->rspdma = iod->rspdma;
1591 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1592 /* Be preventative. handlers will later set to valid length */
1593 iod->lsreq->rsplen = 0;
1594
1595 iod->assoc = NULL;
1596
1597 /*
1598 * handlers:
1599 * parse request input, execute the request, and format the
1600 * LS response
1601 */
1602 switch (w0->ls_cmd) {
1603 case FCNVME_LS_CREATE_ASSOCIATION:
1604 /* Creates Association and initial Admin Queue/Connection */
1605 nvmet_fc_ls_create_association(tgtport, iod);
1606 break;
1607 case FCNVME_LS_CREATE_CONNECTION:
1608 /* Creates an IO Queue/Connection */
1609 nvmet_fc_ls_create_connection(tgtport, iod);
1610 break;
1611 case FCNVME_LS_DISCONNECT:
1612 /* Terminate a Queue/Connection or the Association */
1613 nvmet_fc_ls_disconnect(tgtport, iod);
1614 break;
1615 default:
1616 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1617 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
James Smart4083aa92017-03-23 20:41:24 -07001618 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001619 }
1620
1621 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1622}
1623
1624/*
1625 * Actual processing routine for received FC-NVME LS Requests from the LLD
1626 */
1627static void
1628nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1629{
1630 struct nvmet_fc_ls_iod *iod =
1631 container_of(work, struct nvmet_fc_ls_iod, work);
1632 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1633
1634 nvmet_fc_handle_ls_rqst(tgtport, iod);
1635}
1636
1637
1638/**
1639 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1640 * upon the reception of a NVME LS request.
1641 *
1642 * The nvmet-fc layer will copy payload to an internal structure for
1643 * processing. As such, upon completion of the routine, the LLDD may
1644 * immediately free/reuse the LS request buffer passed in the call.
1645 *
1646 * If this routine returns error, the LLDD should abort the exchange.
1647 *
1648 * @tgtport: pointer to the (registered) target port the LS was
1649 * received on.
1650 * @lsreq: pointer to a lsreq request structure to be used to reference
1651 * the exchange corresponding to the LS.
1652 * @lsreqbuf: pointer to the buffer containing the LS Request
1653 * @lsreqbuf_len: length, in bytes, of the received LS request
1654 */
1655int
1656nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1657 struct nvmefc_tgt_ls_req *lsreq,
1658 void *lsreqbuf, u32 lsreqbuf_len)
1659{
1660 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1661 struct nvmet_fc_ls_iod *iod;
1662
1663 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1664 return -E2BIG;
1665
1666 if (!nvmet_fc_tgtport_get(tgtport))
1667 return -ESHUTDOWN;
1668
1669 iod = nvmet_fc_alloc_ls_iod(tgtport);
1670 if (!iod) {
1671 nvmet_fc_tgtport_put(tgtport);
1672 return -ENOENT;
1673 }
1674
1675 iod->lsreq = lsreq;
1676 iod->fcpreq = NULL;
1677 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1678 iod->rqstdatalen = lsreqbuf_len;
1679
1680 schedule_work(&iod->work);
1681
1682 return 0;
1683}
1684EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1685
1686
1687/*
1688 * **********************
1689 * Start of FCP handling
1690 * **********************
1691 */
1692
1693static int
1694nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1695{
1696 struct scatterlist *sg;
1697 struct page *page;
1698 unsigned int nent;
1699 u32 page_len, length;
1700 int i = 0;
1701
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01001702 length = fod->req.transfer_len;
James Smartc5343202016-12-02 00:28:43 -08001703 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1704 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1705 if (!sg)
1706 goto out;
1707
1708 sg_init_table(sg, nent);
1709
1710 while (length) {
1711 page_len = min_t(u32, length, PAGE_SIZE);
1712
1713 page = alloc_page(GFP_KERNEL);
1714 if (!page)
1715 goto out_free_pages;
1716
1717 sg_set_page(&sg[i], page, page_len, 0);
1718 length -= page_len;
1719 i++;
1720 }
1721
1722 fod->data_sg = sg;
1723 fod->data_sg_cnt = nent;
1724 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1725 ((fod->io_dir == NVMET_FCP_WRITE) ?
1726 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1727 /* note: write from initiator perspective */
1728
1729 return 0;
1730
1731out_free_pages:
1732 while (i > 0) {
1733 i--;
1734 __free_page(sg_page(&sg[i]));
1735 }
1736 kfree(sg);
1737 fod->data_sg = NULL;
1738 fod->data_sg_cnt = 0;
1739out:
1740 return NVME_SC_INTERNAL;
1741}
1742
1743static void
1744nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1745{
1746 struct scatterlist *sg;
1747 int count;
1748
1749 if (!fod->data_sg || !fod->data_sg_cnt)
1750 return;
1751
1752 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1753 ((fod->io_dir == NVMET_FCP_WRITE) ?
1754 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1755 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1756 __free_page(sg_page(sg));
1757 kfree(fod->data_sg);
James Smartc820ad42017-03-23 20:41:26 -07001758 fod->data_sg = NULL;
1759 fod->data_sg_cnt = 0;
James Smartc5343202016-12-02 00:28:43 -08001760}
1761
1762
1763static bool
1764queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1765{
1766 u32 sqtail, used;
1767
1768 /* egad, this is ugly. And sqtail is just a best guess */
1769 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1770
1771 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1772 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1773}
1774
1775/*
1776 * Prep RSP payload.
1777 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1778 */
1779static void
1780nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1781 struct nvmet_fc_fcp_iod *fod)
1782{
1783 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1784 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1785 struct nvme_completion *cqe = &ersp->cqe;
1786 u32 *cqewd = (u32 *)cqe;
1787 bool send_ersp = false;
1788 u32 rsn, rspcnt, xfr_length;
1789
1790 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01001791 xfr_length = fod->req.transfer_len;
James Smartc5343202016-12-02 00:28:43 -08001792 else
1793 xfr_length = fod->offset;
1794
1795 /*
1796 * check to see if we can send a 0's rsp.
1797 * Note: to send a 0's response, the NVME-FC host transport will
1798 * recreate the CQE. The host transport knows: sq id, SQHD (last
1799 * seen in an ersp), and command_id. Thus it will create a
1800 * zero-filled CQE with those known fields filled in. Transport
1801 * must send an ersp for any condition where the cqe won't match
1802 * this.
1803 *
1804 * Here are the FC-NVME mandated cases where we must send an ersp:
1805 * every N responses, where N=ersp_ratio
1806 * force fabric commands to send ersp's (not in FC-NVME but good
1807 * practice)
1808 * normal cmds: any time status is non-zero, or status is zero
1809 * but words 0 or 1 are non-zero.
1810 * the SQ is 90% or more full
1811 * the cmd is a fused command
1812 * transferred data length not equal to cmd iu length
1813 */
1814 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1815 if (!(rspcnt % fod->queue->ersp_ratio) ||
1816 sqe->opcode == nvme_fabrics_command ||
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01001817 xfr_length != fod->req.transfer_len ||
James Smartc5343202016-12-02 00:28:43 -08001818 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1819 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
Christoph Hellwig8ad76cf2017-04-21 10:43:13 +02001820 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
James Smartc5343202016-12-02 00:28:43 -08001821 send_ersp = true;
1822
1823 /* re-set the fields */
1824 fod->fcpreq->rspaddr = ersp;
1825 fod->fcpreq->rspdma = fod->rspdma;
1826
1827 if (!send_ersp) {
1828 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1829 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1830 } else {
1831 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1832 rsn = atomic_inc_return(&fod->queue->rsn);
1833 ersp->rsn = cpu_to_be32(rsn);
1834 ersp->xfrd_len = cpu_to_be32(xfr_length);
1835 fod->fcpreq->rsplen = sizeof(*ersp);
1836 }
1837
1838 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1839 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1840}
1841
1842static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1843
1844static void
James Smarta97ec512017-04-11 11:32:31 -07001845nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1846 struct nvmet_fc_fcp_iod *fod)
1847{
1848 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1849
1850 /* data no longer needed */
1851 nvmet_fc_free_tgt_pgs(fod);
1852
1853 /*
1854 * if an ABTS was received or we issued the fcp_abort early
1855 * don't call abort routine again.
1856 */
1857 /* no need to take lock - lock was taken earlier to get here */
1858 if (!fod->aborted)
1859 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1860
1861 nvmet_fc_free_fcp_iod(fod->queue, fod);
1862}
1863
1864static void
James Smartc5343202016-12-02 00:28:43 -08001865nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1866 struct nvmet_fc_fcp_iod *fod)
1867{
1868 int ret;
1869
1870 fod->fcpreq->op = NVMET_FCOP_RSP;
1871 fod->fcpreq->timeout = 0;
1872
1873 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1874
1875 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1876 if (ret)
James Smarta97ec512017-04-11 11:32:31 -07001877 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08001878}
1879
1880static void
1881nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1882 struct nvmet_fc_fcp_iod *fod, u8 op)
1883{
1884 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
James Smarta97ec512017-04-11 11:32:31 -07001885 unsigned long flags;
James Smart48fa3622017-07-31 13:21:14 -07001886 u32 tlen;
James Smartc5343202016-12-02 00:28:43 -08001887 int ret;
1888
1889 fcpreq->op = op;
1890 fcpreq->offset = fod->offset;
1891 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
James Smart48fa3622017-07-31 13:21:14 -07001892
1893 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01001894 (fod->req.transfer_len - fod->offset));
James Smartc5343202016-12-02 00:28:43 -08001895 fcpreq->transfer_length = tlen;
1896 fcpreq->transferred_length = 0;
1897 fcpreq->fcp_error = 0;
1898 fcpreq->rsplen = 0;
1899
James Smart48fa3622017-07-31 13:21:14 -07001900 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1901 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
James Smartc5343202016-12-02 00:28:43 -08001902
1903 /*
1904 * If the last READDATA request: check if LLDD supports
1905 * combined xfr with response.
1906 */
1907 if ((op == NVMET_FCOP_READDATA) &&
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01001908 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
James Smartc5343202016-12-02 00:28:43 -08001909 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1910 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1911 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1912 }
1913
1914 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1915 if (ret) {
1916 /*
1917 * should be ok to set w/o lock as its in the thread of
1918 * execution (not an async timer routine) and doesn't
1919 * contend with any clearing action
1920 */
1921 fod->abort = true;
1922
James Smarta97ec512017-04-11 11:32:31 -07001923 if (op == NVMET_FCOP_WRITEDATA) {
1924 spin_lock_irqsave(&fod->flock, flags);
1925 fod->writedataactive = false;
1926 spin_unlock_irqrestore(&fod->flock, flags);
James Smart29b3d262017-09-07 16:27:27 -07001927 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
James Smarta97ec512017-04-11 11:32:31 -07001928 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
James Smartc5343202016-12-02 00:28:43 -08001929 fcpreq->fcp_error = ret;
1930 fcpreq->transferred_length = 0;
1931 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1932 }
1933 }
1934}
1935
James Smarta97ec512017-04-11 11:32:31 -07001936static inline bool
1937__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1938{
1939 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1940 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1941
1942 /* if in the middle of an io and we need to tear down */
1943 if (abort) {
1944 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
James Smart29b3d262017-09-07 16:27:27 -07001945 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
James Smarta97ec512017-04-11 11:32:31 -07001946 return true;
1947 }
1948
1949 nvmet_fc_abort_op(tgtport, fod);
1950 return true;
1951 }
1952
1953 return false;
1954}
1955
James Smart39498fa2017-04-11 11:32:28 -07001956/*
1957 * actual done handler for FCP operations when completed by the lldd
1958 */
James Smartc5343202016-12-02 00:28:43 -08001959static void
James Smart39498fa2017-04-11 11:32:28 -07001960nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
James Smartc5343202016-12-02 00:28:43 -08001961{
James Smart39498fa2017-04-11 11:32:28 -07001962 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
James Smartc5343202016-12-02 00:28:43 -08001963 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1964 unsigned long flags;
1965 bool abort;
1966
1967 spin_lock_irqsave(&fod->flock, flags);
1968 abort = fod->abort;
James Smarta97ec512017-04-11 11:32:31 -07001969 fod->writedataactive = false;
James Smartc5343202016-12-02 00:28:43 -08001970 spin_unlock_irqrestore(&fod->flock, flags);
1971
James Smartc5343202016-12-02 00:28:43 -08001972 switch (fcpreq->op) {
1973
1974 case NVMET_FCOP_WRITEDATA:
James Smarta97ec512017-04-11 11:32:31 -07001975 if (__nvmet_fc_fod_op_abort(fod, abort))
1976 return;
James Smartf64935a2016-12-24 09:46:44 -08001977 if (fcpreq->fcp_error ||
James Smartc5343202016-12-02 00:28:43 -08001978 fcpreq->transferred_length != fcpreq->transfer_length) {
James Smarta97ec512017-04-11 11:32:31 -07001979 spin_lock(&fod->flock);
1980 fod->abort = true;
1981 spin_unlock(&fod->flock);
1982
James Smart29b3d262017-09-07 16:27:27 -07001983 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
James Smartc5343202016-12-02 00:28:43 -08001984 return;
1985 }
1986
1987 fod->offset += fcpreq->transferred_length;
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01001988 if (fod->offset != fod->req.transfer_len) {
James Smarta97ec512017-04-11 11:32:31 -07001989 spin_lock_irqsave(&fod->flock, flags);
1990 fod->writedataactive = true;
1991 spin_unlock_irqrestore(&fod->flock, flags);
1992
James Smartc5343202016-12-02 00:28:43 -08001993 /* transfer the next chunk */
1994 nvmet_fc_transfer_fcp_data(tgtport, fod,
1995 NVMET_FCOP_WRITEDATA);
1996 return;
1997 }
1998
1999 /* data transfer complete, resume with nvmet layer */
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01002000 nvmet_req_execute(&fod->req);
James Smartc5343202016-12-02 00:28:43 -08002001 break;
2002
2003 case NVMET_FCOP_READDATA:
2004 case NVMET_FCOP_READDATA_RSP:
James Smarta97ec512017-04-11 11:32:31 -07002005 if (__nvmet_fc_fod_op_abort(fod, abort))
2006 return;
James Smartf64935a2016-12-24 09:46:44 -08002007 if (fcpreq->fcp_error ||
James Smartc5343202016-12-02 00:28:43 -08002008 fcpreq->transferred_length != fcpreq->transfer_length) {
James Smarta97ec512017-04-11 11:32:31 -07002009 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08002010 return;
2011 }
2012
2013 /* success */
2014
2015 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2016 /* data no longer needed */
2017 nvmet_fc_free_tgt_pgs(fod);
James Smartc5343202016-12-02 00:28:43 -08002018 nvmet_fc_free_fcp_iod(fod->queue, fod);
2019 return;
2020 }
2021
2022 fod->offset += fcpreq->transferred_length;
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01002023 if (fod->offset != fod->req.transfer_len) {
James Smartc5343202016-12-02 00:28:43 -08002024 /* transfer the next chunk */
2025 nvmet_fc_transfer_fcp_data(tgtport, fod,
2026 NVMET_FCOP_READDATA);
2027 return;
2028 }
2029
2030 /* data transfer complete, send response */
2031
2032 /* data no longer needed */
2033 nvmet_fc_free_tgt_pgs(fod);
2034
2035 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2036
2037 break;
2038
2039 case NVMET_FCOP_RSP:
James Smarta97ec512017-04-11 11:32:31 -07002040 if (__nvmet_fc_fod_op_abort(fod, abort))
2041 return;
James Smartc5343202016-12-02 00:28:43 -08002042 nvmet_fc_free_fcp_iod(fod->queue, fod);
2043 break;
2044
2045 default:
James Smartc5343202016-12-02 00:28:43 -08002046 break;
2047 }
2048}
2049
James Smart39498fa2017-04-11 11:32:28 -07002050static void
2051nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2052{
2053 struct nvmet_fc_fcp_iod *fod =
2054 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2055
2056 nvmet_fc_fod_op_done(fod);
2057}
2058
2059static void
2060nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2061{
2062 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2063 struct nvmet_fc_tgt_queue *queue = fod->queue;
2064
2065 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2066 /* context switch so completion is not in ISR context */
2067 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2068 else
2069 nvmet_fc_fod_op_done(fod);
2070}
2071
James Smartc5343202016-12-02 00:28:43 -08002072/*
2073 * actual completion handler after execution by the nvmet layer
2074 */
2075static void
2076__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2077 struct nvmet_fc_fcp_iod *fod, int status)
2078{
2079 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2080 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2081 unsigned long flags;
2082 bool abort;
2083
2084 spin_lock_irqsave(&fod->flock, flags);
2085 abort = fod->abort;
2086 spin_unlock_irqrestore(&fod->flock, flags);
2087
2088 /* if we have a CQE, snoop the last sq_head value */
2089 if (!status)
2090 fod->queue->sqhd = cqe->sq_head;
2091
2092 if (abort) {
James Smarta97ec512017-04-11 11:32:31 -07002093 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08002094 return;
2095 }
2096
2097 /* if an error handling the cmd post initial parsing */
2098 if (status) {
2099 /* fudge up a failed CQE status for our transport error */
2100 memset(cqe, 0, sizeof(*cqe));
2101 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2102 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2103 cqe->command_id = sqe->command_id;
2104 cqe->status = cpu_to_le16(status);
2105 } else {
2106
2107 /*
2108 * try to push the data even if the SQE status is non-zero.
2109 * There may be a status where data still was intended to
2110 * be moved
2111 */
2112 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2113 /* push the data over before sending rsp */
2114 nvmet_fc_transfer_fcp_data(tgtport, fod,
2115 NVMET_FCOP_READDATA);
2116 return;
2117 }
2118
2119 /* writes & no data - fall thru */
2120 }
2121
2122 /* data no longer needed */
2123 nvmet_fc_free_tgt_pgs(fod);
2124
2125 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2126}
2127
2128
2129static void
2130nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2131{
2132 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2133 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2134
2135 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2136}
2137
2138
2139/*
2140 * Actual processing routine for received FC-NVME LS Requests from the LLD
2141 */
Christoph Hellwigedba98d2017-04-21 10:37:59 +02002142static void
James Smartc5343202016-12-02 00:28:43 -08002143nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2144 struct nvmet_fc_fcp_iod *fod)
2145{
2146 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2147 int ret;
2148
2149 /*
2150 * Fused commands are currently not supported in the linux
2151 * implementation.
2152 *
2153 * As such, the implementation of the FC transport does not
2154 * look at the fused commands and order delivery to the upper
2155 * layer until we have both based on csn.
2156 */
2157
2158 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2159
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01002160 fod->req.transfer_len = be32_to_cpu(cmdiu->data_len);
James Smartc5343202016-12-02 00:28:43 -08002161 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2162 fod->io_dir = NVMET_FCP_WRITE;
2163 if (!nvme_is_write(&cmdiu->sqe))
2164 goto transport_error;
2165 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2166 fod->io_dir = NVMET_FCP_READ;
2167 if (nvme_is_write(&cmdiu->sqe))
2168 goto transport_error;
2169 } else {
2170 fod->io_dir = NVMET_FCP_NODATA;
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01002171 if (fod->req.transfer_len)
James Smartc5343202016-12-02 00:28:43 -08002172 goto transport_error;
2173 }
2174
2175 fod->req.cmd = &fod->cmdiubuf.sqe;
2176 fod->req.rsp = &fod->rspiubuf.cqe;
2177 fod->req.port = fod->queue->port;
2178
James Smartc5343202016-12-02 00:28:43 -08002179 /* clear any response payload */
2180 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2181
James Smart188f7e82017-06-15 23:41:41 -07002182 fod->data_sg = NULL;
2183 fod->data_sg_cnt = 0;
2184
James Smartc5343202016-12-02 00:28:43 -08002185 ret = nvmet_req_init(&fod->req,
2186 &fod->queue->nvme_cq,
2187 &fod->queue->nvme_sq,
2188 &nvmet_fc_tgt_fcp_ops);
James Smart188f7e82017-06-15 23:41:41 -07002189 if (!ret) {
2190 /* bad SQE content or invalid ctrl state */
2191 /* nvmet layer has already called op done to send rsp. */
James Smartc5343202016-12-02 00:28:43 -08002192 return;
2193 }
2194
2195 /* keep a running counter of tail position */
2196 atomic_inc(&fod->queue->sqtail);
2197
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01002198 if (fod->req.transfer_len) {
James Smartc5343202016-12-02 00:28:43 -08002199 ret = nvmet_fc_alloc_tgt_pgs(fod);
2200 if (ret) {
2201 nvmet_req_complete(&fod->req, ret);
2202 return;
2203 }
2204 }
2205 fod->req.sg = fod->data_sg;
2206 fod->req.sg_cnt = fod->data_sg_cnt;
2207 fod->offset = 0;
James Smartc5343202016-12-02 00:28:43 -08002208
2209 if (fod->io_dir == NVMET_FCP_WRITE) {
2210 /* pull the data over before invoking nvmet layer */
2211 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2212 return;
2213 }
2214
2215 /*
2216 * Reads or no data:
2217 *
2218 * can invoke the nvmet_layer now. If read data, cmd completion will
2219 * push the data
2220 */
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +01002221 nvmet_req_execute(&fod->req);
James Smartc5343202016-12-02 00:28:43 -08002222 return;
2223
2224transport_error:
James Smarta97ec512017-04-11 11:32:31 -07002225 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08002226}
2227
2228/*
2229 * Actual processing routine for received FC-NVME LS Requests from the LLD
2230 */
2231static void
2232nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2233{
2234 struct nvmet_fc_fcp_iod *fod =
2235 container_of(work, struct nvmet_fc_fcp_iod, work);
2236 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2237
2238 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2239}
2240
2241/**
2242 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2243 * upon the reception of a NVME FCP CMD IU.
2244 *
2245 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2246 * layer for processing.
2247 *
James Smart0fb228d2017-08-01 15:12:39 -07002248 * The nvmet_fc layer allocates a local job structure (struct
2249 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2250 * CMD IU buffer to the job structure. As such, on a successful
2251 * completion (returns 0), the LLDD may immediately free/reuse
2252 * the CMD IU buffer passed in the call.
James Smartc5343202016-12-02 00:28:43 -08002253 *
James Smart0fb228d2017-08-01 15:12:39 -07002254 * However, in some circumstances, due to the packetized nature of FC
2255 * and the api of the FC LLDD which may issue a hw command to send the
2256 * response, but the LLDD may not get the hw completion for that command
2257 * and upcall the nvmet_fc layer before a new command may be
2258 * asynchronously received - its possible for a command to be received
2259 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2260 * the appearance of more commands received than fits in the sq.
2261 * To alleviate this scenario, a temporary queue is maintained in the
2262 * transport for pending LLDD requests waiting for a queue job structure.
2263 * In these "overrun" cases, a temporary queue element is allocated
2264 * the LLDD request and CMD iu buffer information remembered, and the
2265 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2266 * structure is freed, it is immediately reallocated for anything on the
2267 * pending request list. The LLDDs defer_rcv() callback is called,
2268 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2269 * is then started normally with the transport.
2270 *
2271 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2272 * the completion as successful but must not reuse the CMD IU buffer
2273 * until the LLDD's defer_rcv() callback has been called for the
2274 * corresponding struct nvmefc_tgt_fcp_req pointer.
2275 *
2276 * If there is any other condition in which an error occurs, the
2277 * transport will return a non-zero status indicating the error.
2278 * In all cases other than -EOVERFLOW, the transport has not accepted the
2279 * request and the LLDD should abort the exchange.
James Smartc5343202016-12-02 00:28:43 -08002280 *
2281 * @target_port: pointer to the (registered) target port the FCP CMD IU
James Smart19b58d92017-04-11 11:32:29 -07002282 * was received on.
James Smartc5343202016-12-02 00:28:43 -08002283 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2284 * the exchange corresponding to the FCP Exchange.
2285 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2286 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2287 */
2288int
2289nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2290 struct nvmefc_tgt_fcp_req *fcpreq,
2291 void *cmdiubuf, u32 cmdiubuf_len)
2292{
2293 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2294 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2295 struct nvmet_fc_tgt_queue *queue;
2296 struct nvmet_fc_fcp_iod *fod;
James Smart0fb228d2017-08-01 15:12:39 -07002297 struct nvmet_fc_defer_fcp_req *deferfcp;
2298 unsigned long flags;
James Smartc5343202016-12-02 00:28:43 -08002299
2300 /* validate iu, so the connection id can be used to find the queue */
2301 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2302 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2303 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2304 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2305 return -EIO;
2306
James Smartc5343202016-12-02 00:28:43 -08002307 queue = nvmet_fc_find_target_queue(tgtport,
2308 be64_to_cpu(cmdiu->connection_id));
2309 if (!queue)
2310 return -ENOTCONN;
2311
2312 /*
2313 * note: reference taken by find_target_queue
2314 * After successful fod allocation, the fod will inherit the
2315 * ownership of that reference and will remove the reference
2316 * when the fod is freed.
2317 */
2318
James Smart0fb228d2017-08-01 15:12:39 -07002319 spin_lock_irqsave(&queue->qlock, flags);
2320
James Smartc5343202016-12-02 00:28:43 -08002321 fod = nvmet_fc_alloc_fcp_iod(queue);
James Smart0fb228d2017-08-01 15:12:39 -07002322 if (fod) {
2323 spin_unlock_irqrestore(&queue->qlock, flags);
2324
2325 fcpreq->nvmet_fc_private = fod;
2326 fod->fcpreq = fcpreq;
2327
2328 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2329
2330 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2331
2332 return 0;
2333 }
2334
2335 if (!tgtport->ops->defer_rcv) {
2336 spin_unlock_irqrestore(&queue->qlock, flags);
James Smartc5343202016-12-02 00:28:43 -08002337 /* release the queue lookup reference */
2338 nvmet_fc_tgt_q_put(queue);
2339 return -ENOENT;
2340 }
2341
James Smart0fb228d2017-08-01 15:12:39 -07002342 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2343 struct nvmet_fc_defer_fcp_req, req_list);
2344 if (deferfcp) {
2345 /* Just re-use one that was previously allocated */
2346 list_del(&deferfcp->req_list);
2347 } else {
2348 spin_unlock_irqrestore(&queue->qlock, flags);
James Smartc5343202016-12-02 00:28:43 -08002349
James Smart0fb228d2017-08-01 15:12:39 -07002350 /* Now we need to dynamically allocate one */
2351 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2352 if (!deferfcp) {
2353 /* release the queue lookup reference */
2354 nvmet_fc_tgt_q_put(queue);
2355 return -ENOMEM;
2356 }
2357 spin_lock_irqsave(&queue->qlock, flags);
2358 }
James Smartc5343202016-12-02 00:28:43 -08002359
James Smart0fb228d2017-08-01 15:12:39 -07002360 /* For now, use rspaddr / rsplen to save payload information */
2361 fcpreq->rspaddr = cmdiubuf;
2362 fcpreq->rsplen = cmdiubuf_len;
2363 deferfcp->fcp_req = fcpreq;
2364
2365 /* defer processing till a fod becomes available */
2366 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2367
2368 /* NOTE: the queue lookup reference is still valid */
2369
2370 spin_unlock_irqrestore(&queue->qlock, flags);
2371
2372 return -EOVERFLOW;
James Smartc5343202016-12-02 00:28:43 -08002373}
2374EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2375
James Smarta97ec512017-04-11 11:32:31 -07002376/**
2377 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2378 * upon the reception of an ABTS for a FCP command
2379 *
2380 * Notify the transport that an ABTS has been received for a FCP command
2381 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2382 * LLDD believes the command is still being worked on
2383 * (template_ops->fcp_req_release() has not been called).
2384 *
2385 * The transport will wait for any outstanding work (an op to the LLDD,
2386 * which the lldd should complete with error due to the ABTS; or the
2387 * completion from the nvmet layer of the nvme command), then will
2388 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2389 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2390 * to the ABTS either after return from this function (assuming any
2391 * outstanding op work has been terminated) or upon the callback being
2392 * called.
2393 *
2394 * @target_port: pointer to the (registered) target port the FCP CMD IU
2395 * was received on.
2396 * @fcpreq: pointer to the fcpreq request structure that corresponds
2397 * to the exchange that received the ABTS.
2398 */
2399void
2400nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2401 struct nvmefc_tgt_fcp_req *fcpreq)
2402{
2403 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2404 struct nvmet_fc_tgt_queue *queue;
2405 unsigned long flags;
2406
2407 if (!fod || fod->fcpreq != fcpreq)
2408 /* job appears to have already completed, ignore abort */
2409 return;
2410
2411 queue = fod->queue;
2412
2413 spin_lock_irqsave(&queue->qlock, flags);
2414 if (fod->active) {
2415 /*
2416 * mark as abort. The abort handler, invoked upon completion
2417 * of any work, will detect the aborted status and do the
2418 * callback.
2419 */
2420 spin_lock(&fod->flock);
2421 fod->abort = true;
2422 fod->aborted = true;
2423 spin_unlock(&fod->flock);
2424 }
2425 spin_unlock_irqrestore(&queue->qlock, flags);
2426}
2427EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2428
James Smartc5343202016-12-02 00:28:43 -08002429
2430struct nvmet_fc_traddr {
2431 u64 nn;
2432 u64 pn;
2433};
2434
James Smartc5343202016-12-02 00:28:43 -08002435static int
James Smart9c5358e2017-07-17 13:59:39 -07002436__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
James Smartc5343202016-12-02 00:28:43 -08002437{
James Smartc5343202016-12-02 00:28:43 -08002438 u64 token64;
2439
James Smart9c5358e2017-07-17 13:59:39 -07002440 if (match_u64(sstr, &token64))
2441 return -EINVAL;
2442 *val = token64;
James Smartc5343202016-12-02 00:28:43 -08002443
James Smart9c5358e2017-07-17 13:59:39 -07002444 return 0;
2445}
James Smartc5343202016-12-02 00:28:43 -08002446
James Smart9c5358e2017-07-17 13:59:39 -07002447/*
2448 * This routine validates and extracts the WWN's from the TRADDR string.
2449 * As kernel parsers need the 0x to determine number base, universally
2450 * build string to parse with 0x prefix before parsing name strings.
2451 */
2452static int
2453nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2454{
2455 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2456 substring_t wwn = { name, &name[sizeof(name)-1] };
2457 int nnoffset, pnoffset;
James Smartc5343202016-12-02 00:28:43 -08002458
James Smart9c5358e2017-07-17 13:59:39 -07002459 /* validate it string one of the 2 allowed formats */
2460 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2461 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2462 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2463 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2464 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2465 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2466 NVME_FC_TRADDR_OXNNLEN;
2467 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2468 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2469 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2470 "pn-", NVME_FC_TRADDR_NNLEN))) {
2471 nnoffset = NVME_FC_TRADDR_NNLEN;
2472 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2473 } else
2474 goto out_einval;
2475
2476 name[0] = '0';
2477 name[1] = 'x';
2478 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2479
2480 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2481 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2482 goto out_einval;
2483
2484 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2485 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2486 goto out_einval;
2487
2488 return 0;
2489
2490out_einval:
2491 pr_warn("%s: bad traddr string\n", __func__);
2492 return -EINVAL;
James Smartc5343202016-12-02 00:28:43 -08002493}
2494
2495static int
2496nvmet_fc_add_port(struct nvmet_port *port)
2497{
2498 struct nvmet_fc_tgtport *tgtport;
2499 struct nvmet_fc_traddr traddr = { 0L, 0L };
2500 unsigned long flags;
2501 int ret;
2502
2503 /* validate the address info */
2504 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2505 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2506 return -EINVAL;
2507
2508 /* map the traddr address info to a target port */
2509
James Smart9c5358e2017-07-17 13:59:39 -07002510 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2511 sizeof(port->disc_addr.traddr));
James Smartc5343202016-12-02 00:28:43 -08002512 if (ret)
2513 return ret;
2514
2515 ret = -ENXIO;
2516 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2517 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2518 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2519 (tgtport->fc_target_port.port_name == traddr.pn)) {
2520 /* a FC port can only be 1 nvmet port id */
2521 if (!tgtport->port) {
2522 tgtport->port = port;
2523 port->priv = tgtport;
James Smart568ad512017-04-11 11:32:32 -07002524 nvmet_fc_tgtport_get(tgtport);
James Smartc5343202016-12-02 00:28:43 -08002525 ret = 0;
2526 } else
2527 ret = -EALREADY;
2528 break;
2529 }
2530 }
2531 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2532 return ret;
2533}
2534
2535static void
2536nvmet_fc_remove_port(struct nvmet_port *port)
2537{
2538 struct nvmet_fc_tgtport *tgtport = port->priv;
2539 unsigned long flags;
James Smart3688feb2017-09-19 15:13:11 -07002540 bool matched = false;
James Smartc5343202016-12-02 00:28:43 -08002541
2542 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2543 if (tgtport->port == port) {
James Smart3688feb2017-09-19 15:13:11 -07002544 matched = true;
James Smartc5343202016-12-02 00:28:43 -08002545 tgtport->port = NULL;
2546 }
2547 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
James Smart3688feb2017-09-19 15:13:11 -07002548
2549 if (matched)
2550 nvmet_fc_tgtport_put(tgtport);
James Smartc5343202016-12-02 00:28:43 -08002551}
2552
2553static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2554 .owner = THIS_MODULE,
2555 .type = NVMF_TRTYPE_FC,
2556 .msdbd = 1,
2557 .add_port = nvmet_fc_add_port,
2558 .remove_port = nvmet_fc_remove_port,
2559 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2560 .delete_ctrl = nvmet_fc_delete_ctrl,
2561};
2562
2563static int __init nvmet_fc_init_module(void)
2564{
2565 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2566}
2567
2568static void __exit nvmet_fc_exit_module(void)
2569{
2570 /* sanity check - all lports should be removed */
2571 if (!list_empty(&nvmet_fc_target_list))
2572 pr_warn("%s: targetport list not empty\n", __func__);
2573
2574 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2575
2576 ida_destroy(&nvmet_fc_tgtport_cnt);
2577}
2578
2579module_init(nvmet_fc_init_module);
2580module_exit(nvmet_fc_exit_module);
2581
2582MODULE_LICENSE("GPL v2");