blob: c48c83d97e304169185ddfad622bed93155e0412 [file] [log] [blame]
James Smartc5343202016-12-02 00:28:43 -08001/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/blk-mq.h>
21#include <linux/parser.h>
22#include <linux/random.h>
23#include <uapi/scsi/fc/fc_fs.h>
24#include <uapi/scsi/fc/fc_els.h>
25
26#include "nvmet.h"
27#include <linux/nvme-fc-driver.h>
28#include <linux/nvme-fc.h>
29
30
31/* *************************** Data Structures/Defines ****************** */
32
33
34#define NVMET_LS_CTX_COUNT 4
35
36/* for this implementation, assume small single frame rqst/rsp */
37#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39struct nvmet_fc_tgtport;
40struct nvmet_fc_tgt_assoc;
41
42struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59} __aligned(sizeof(unsigned long long));
60
James Smart48fa3622017-07-31 13:21:14 -070061#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
James Smartc5343202016-12-02 00:28:43 -080063
64enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA,
66 NVMET_FCP_WRITE,
67 NVMET_FCP_READ,
68 NVMET_FCP_ABORTED,
69};
70
71struct nvmet_fc_fcp_iod {
72 struct nvmefc_tgt_fcp_req *fcpreq;
73
74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma;
77 struct scatterlist *data_sg;
James Smartc5343202016-12-02 00:28:43 -080078 int data_sg_cnt;
James Smartc5343202016-12-02 00:28:43 -080079 u32 total_length;
80 u32 offset;
81 enum nvmet_fcp_datadir io_dir;
82 bool active;
83 bool abort;
James Smarta97ec512017-04-11 11:32:31 -070084 bool aborted;
85 bool writedataactive;
James Smartc5343202016-12-02 00:28:43 -080086 spinlock_t flock;
87
88 struct nvmet_req req;
89 struct work_struct work;
James Smart39498fa2017-04-11 11:32:28 -070090 struct work_struct done_work;
James Smartc5343202016-12-02 00:28:43 -080091
92 struct nvmet_fc_tgtport *tgtport;
93 struct nvmet_fc_tgt_queue *queue;
94
95 struct list_head fcp_list; /* tgtport->fcp_list */
96};
97
98struct nvmet_fc_tgtport {
99
100 struct nvmet_fc_target_port fc_target_port;
101
102 struct list_head tgt_list; /* nvmet_fc_target_list */
103 struct device *dev; /* dev for dma mapping */
104 struct nvmet_fc_target_template *ops;
105
106 struct nvmet_fc_ls_iod *iod;
107 spinlock_t lock;
108 struct list_head ls_list;
109 struct list_head ls_busylist;
110 struct list_head assoc_list;
111 struct ida assoc_cnt;
112 struct nvmet_port *port;
113 struct kref ref;
James Smart48fa3622017-07-31 13:21:14 -0700114 u32 max_sg_cnt;
James Smartc5343202016-12-02 00:28:43 -0800115};
116
James Smart0fb228d2017-08-01 15:12:39 -0700117struct nvmet_fc_defer_fcp_req {
118 struct list_head req_list;
119 struct nvmefc_tgt_fcp_req *fcp_req;
120};
121
James Smartc5343202016-12-02 00:28:43 -0800122struct nvmet_fc_tgt_queue {
123 bool ninetypercent;
124 u16 qid;
125 u16 sqsize;
126 u16 ersp_ratio;
Christoph Hellwigf63688a2017-04-21 10:42:23 +0200127 __le16 sqhd;
James Smartc5343202016-12-02 00:28:43 -0800128 int cpu;
129 atomic_t connected;
130 atomic_t sqtail;
131 atomic_t zrspcnt;
132 atomic_t rsn;
133 spinlock_t qlock;
134 struct nvmet_port *port;
135 struct nvmet_cq nvme_cq;
136 struct nvmet_sq nvme_sq;
137 struct nvmet_fc_tgt_assoc *assoc;
138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
139 struct list_head fod_list;
James Smart0fb228d2017-08-01 15:12:39 -0700140 struct list_head pending_cmd_list;
141 struct list_head avail_defer_list;
James Smartc5343202016-12-02 00:28:43 -0800142 struct workqueue_struct *work_q;
143 struct kref ref;
144} __aligned(sizeof(unsigned long long));
145
146struct nvmet_fc_tgt_assoc {
147 u64 association_id;
148 u32 a_id;
149 struct nvmet_fc_tgtport *tgtport;
150 struct list_head a_list;
James Smartdeb61742017-09-11 16:16:53 -0700151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
James Smartc5343202016-12-02 00:28:43 -0800152 struct kref ref;
153};
154
155
156static inline int
157nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158{
159 return (iodptr - iodptr->tgtport->iod);
160}
161
162static inline int
163nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164{
165 return (fodptr - fodptr->queue->fod);
166}
167
168
169/*
170 * Association and Connection IDs:
171 *
172 * Association ID will have random number in upper 6 bytes and zero
173 * in lower 2 bytes
174 *
175 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176 *
177 * note: Association ID = Connection ID for queue 0
178 */
179#define BYTES_FOR_QID sizeof(u16)
180#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
181#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182
183static inline u64
184nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185{
186 return (assoc->association_id | qid);
187}
188
189static inline u64
190nvmet_fc_getassociationid(u64 connectionid)
191{
192 return connectionid & ~NVMET_FC_QUEUEID_MASK;
193}
194
195static inline u16
196nvmet_fc_getqueueid(u64 connectionid)
197{
198 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199}
200
201static inline struct nvmet_fc_tgtport *
202targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203{
204 return container_of(targetport, struct nvmet_fc_tgtport,
205 fc_target_port);
206}
207
208static inline struct nvmet_fc_fcp_iod *
209nvmet_req_to_fod(struct nvmet_req *nvme_req)
210{
211 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212}
213
214
215/* *************************** Globals **************************** */
216
217
218static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219
220static LIST_HEAD(nvmet_fc_target_list);
221static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222
223
224static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
James Smart39498fa2017-04-11 11:32:28 -0700226static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
James Smartc5343202016-12-02 00:28:43 -0800227static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
James Smart0fb228d2017-08-01 15:12:39 -0700233static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
James Smartc5343202016-12-02 00:28:43 -0800235
236
237/* *********************** FC-NVME DMA Handling **************************** */
238
239/*
240 * The fcloop device passes in a NULL device pointer. Real LLD's will
241 * pass in a valid device pointer. If NULL is passed to the dma mapping
242 * routines, depending on the platform, it may or may not succeed, and
243 * may crash.
244 *
245 * As such:
246 * Wrapper all the dma routines and check the dev pointer.
247 *
248 * If simple mappings (return just a dma address, we'll noop them,
249 * returning a dma address of 0.
250 *
251 * On more complex mappings (dma_map_sg), a pseudo routine fills
252 * in the scatter list, setting all dma addresses to 0.
253 */
254
255static inline dma_addr_t
256fc_dma_map_single(struct device *dev, void *ptr, size_t size,
257 enum dma_data_direction dir)
258{
259 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
260}
261
262static inline int
263fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
264{
265 return dev ? dma_mapping_error(dev, dma_addr) : 0;
266}
267
268static inline void
269fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
270 enum dma_data_direction dir)
271{
272 if (dev)
273 dma_unmap_single(dev, addr, size, dir);
274}
275
276static inline void
277fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
278 enum dma_data_direction dir)
279{
280 if (dev)
281 dma_sync_single_for_cpu(dev, addr, size, dir);
282}
283
284static inline void
285fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
286 enum dma_data_direction dir)
287{
288 if (dev)
289 dma_sync_single_for_device(dev, addr, size, dir);
290}
291
292/* pseudo dma_map_sg call */
293static int
294fc_map_sg(struct scatterlist *sg, int nents)
295{
296 struct scatterlist *s;
297 int i;
298
299 WARN_ON(nents == 0 || sg[0].length == 0);
300
301 for_each_sg(sg, s, nents, i) {
302 s->dma_address = 0L;
303#ifdef CONFIG_NEED_SG_DMA_LENGTH
304 s->dma_length = s->length;
305#endif
306 }
307 return nents;
308}
309
310static inline int
311fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
312 enum dma_data_direction dir)
313{
314 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
315}
316
317static inline void
318fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319 enum dma_data_direction dir)
320{
321 if (dev)
322 dma_unmap_sg(dev, sg, nents, dir);
323}
324
325
326/* *********************** FC-NVME Port Management ************************ */
327
328
329static int
330nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
331{
332 struct nvmet_fc_ls_iod *iod;
333 int i;
334
335 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
336 GFP_KERNEL);
337 if (!iod)
338 return -ENOMEM;
339
340 tgtport->iod = iod;
341
342 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
343 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
344 iod->tgtport = tgtport;
345 list_add_tail(&iod->ls_list, &tgtport->ls_list);
346
347 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
348 GFP_KERNEL);
349 if (!iod->rqstbuf)
350 goto out_fail;
351
352 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
353
354 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
355 NVME_FC_MAX_LS_BUFFER_SIZE,
356 DMA_TO_DEVICE);
357 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
358 goto out_fail;
359 }
360
361 return 0;
362
363out_fail:
364 kfree(iod->rqstbuf);
365 list_del(&iod->ls_list);
366 for (iod--, i--; i >= 0; iod--, i--) {
367 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
368 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
369 kfree(iod->rqstbuf);
370 list_del(&iod->ls_list);
371 }
372
373 kfree(iod);
374
375 return -EFAULT;
376}
377
378static void
379nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
380{
381 struct nvmet_fc_ls_iod *iod = tgtport->iod;
382 int i;
383
384 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
385 fc_dma_unmap_single(tgtport->dev,
386 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
387 DMA_TO_DEVICE);
388 kfree(iod->rqstbuf);
389 list_del(&iod->ls_list);
390 }
391 kfree(tgtport->iod);
392}
393
394static struct nvmet_fc_ls_iod *
395nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
396{
James Smart369157b2017-08-16 10:47:03 -0700397 struct nvmet_fc_ls_iod *iod;
James Smartc5343202016-12-02 00:28:43 -0800398 unsigned long flags;
399
400 spin_lock_irqsave(&tgtport->lock, flags);
401 iod = list_first_entry_or_null(&tgtport->ls_list,
402 struct nvmet_fc_ls_iod, ls_list);
403 if (iod)
404 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
405 spin_unlock_irqrestore(&tgtport->lock, flags);
406 return iod;
407}
408
409
410static void
411nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
412 struct nvmet_fc_ls_iod *iod)
413{
414 unsigned long flags;
415
416 spin_lock_irqsave(&tgtport->lock, flags);
417 list_move(&iod->ls_list, &tgtport->ls_list);
418 spin_unlock_irqrestore(&tgtport->lock, flags);
419}
420
421static void
422nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
423 struct nvmet_fc_tgt_queue *queue)
424{
425 struct nvmet_fc_fcp_iod *fod = queue->fod;
426 int i;
427
428 for (i = 0; i < queue->sqsize; fod++, i++) {
429 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
James Smart39498fa2017-04-11 11:32:28 -0700430 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
James Smartc5343202016-12-02 00:28:43 -0800431 fod->tgtport = tgtport;
432 fod->queue = queue;
433 fod->active = false;
James Smarta97ec512017-04-11 11:32:31 -0700434 fod->abort = false;
435 fod->aborted = false;
436 fod->fcpreq = NULL;
James Smartc5343202016-12-02 00:28:43 -0800437 list_add_tail(&fod->fcp_list, &queue->fod_list);
438 spin_lock_init(&fod->flock);
439
440 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
441 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
442 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
443 list_del(&fod->fcp_list);
444 for (fod--, i--; i >= 0; fod--, i--) {
445 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
446 sizeof(fod->rspiubuf),
447 DMA_TO_DEVICE);
448 fod->rspdma = 0L;
449 list_del(&fod->fcp_list);
450 }
451
452 return;
453 }
454 }
455}
456
457static void
458nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
459 struct nvmet_fc_tgt_queue *queue)
460{
461 struct nvmet_fc_fcp_iod *fod = queue->fod;
462 int i;
463
464 for (i = 0; i < queue->sqsize; fod++, i++) {
465 if (fod->rspdma)
466 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
467 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
468 }
469}
470
471static struct nvmet_fc_fcp_iod *
472nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
473{
James Smart369157b2017-08-16 10:47:03 -0700474 struct nvmet_fc_fcp_iod *fod;
James Smartc5343202016-12-02 00:28:43 -0800475
James Smart0fb228d2017-08-01 15:12:39 -0700476 lockdep_assert_held(&queue->qlock);
477
James Smartc5343202016-12-02 00:28:43 -0800478 fod = list_first_entry_or_null(&queue->fod_list,
479 struct nvmet_fc_fcp_iod, fcp_list);
480 if (fod) {
481 list_del(&fod->fcp_list);
482 fod->active = true;
James Smartc5343202016-12-02 00:28:43 -0800483 /*
484 * no queue reference is taken, as it was taken by the
485 * queue lookup just prior to the allocation. The iod
486 * will "inherit" that reference.
487 */
488 }
James Smartc5343202016-12-02 00:28:43 -0800489 return fod;
490}
491
492
493static void
James Smart0fb228d2017-08-01 15:12:39 -0700494nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495 struct nvmet_fc_tgt_queue *queue,
496 struct nvmefc_tgt_fcp_req *fcpreq)
497{
498 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
499
500 /*
501 * put all admin cmds on hw queue id 0. All io commands go to
502 * the respective hw queue based on a modulo basis
503 */
504 fcpreq->hwqid = queue->qid ?
505 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
506
507 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508 queue_work_on(queue->cpu, queue->work_q, &fod->work);
509 else
510 nvmet_fc_handle_fcp_rqst(tgtport, fod);
511}
512
513static void
James Smartc5343202016-12-02 00:28:43 -0800514nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
515 struct nvmet_fc_fcp_iod *fod)
516{
James Smart19b58d92017-04-11 11:32:29 -0700517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
518 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
James Smart0fb228d2017-08-01 15:12:39 -0700519 struct nvmet_fc_defer_fcp_req *deferfcp;
James Smartc5343202016-12-02 00:28:43 -0800520 unsigned long flags;
521
James Smarta97ec512017-04-11 11:32:31 -0700522 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
523 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
524
525 fcpreq->nvmet_fc_private = NULL;
526
James Smartc5343202016-12-02 00:28:43 -0800527 fod->active = false;
James Smarta97ec512017-04-11 11:32:31 -0700528 fod->abort = false;
529 fod->aborted = false;
530 fod->writedataactive = false;
531 fod->fcpreq = NULL;
James Smart19b58d92017-04-11 11:32:29 -0700532
533 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
James Smart0fb228d2017-08-01 15:12:39 -0700534
535 spin_lock_irqsave(&queue->qlock, flags);
536 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537 struct nvmet_fc_defer_fcp_req, req_list);
538 if (!deferfcp) {
539 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540 spin_unlock_irqrestore(&queue->qlock, flags);
541
542 /* Release reference taken at queue lookup and fod allocation */
543 nvmet_fc_tgt_q_put(queue);
544 return;
545 }
546
547 /* Re-use the fod for the next pending cmd that was deferred */
548 list_del(&deferfcp->req_list);
549
550 fcpreq = deferfcp->fcp_req;
551
552 /* deferfcp can be reused for another IO at a later date */
553 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
554
555 spin_unlock_irqrestore(&queue->qlock, flags);
556
557 /* Save NVME CMD IO in fod */
558 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
559
560 /* Setup new fcpreq to be processed */
561 fcpreq->rspaddr = NULL;
562 fcpreq->rsplen = 0;
563 fcpreq->nvmet_fc_private = fod;
564 fod->fcpreq = fcpreq;
565 fod->active = true;
566
567 /* inform LLDD IO is now being processed */
568 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
569
570 /* Submit deferred IO for processing */
571 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
572
573 /*
574 * Leave the queue lookup get reference taken when
575 * fod was originally allocated.
576 */
James Smartc5343202016-12-02 00:28:43 -0800577}
578
579static int
580nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
581{
582 int cpu, idx, cnt;
583
James Smart4b8ba5f2017-04-25 16:23:09 -0700584 if (tgtport->ops->max_hw_queues == 1)
James Smartc5343202016-12-02 00:28:43 -0800585 return WORK_CPU_UNBOUND;
586
587 /* Simple cpu selection based on qid modulo active cpu count */
588 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
589
590 /* find the n'th active cpu */
591 for (cpu = 0, cnt = 0; ; ) {
592 if (cpu_active(cpu)) {
593 if (cnt == idx)
594 break;
595 cnt++;
596 }
597 cpu = (cpu + 1) % num_possible_cpus();
598 }
599
600 return cpu;
601}
602
603static struct nvmet_fc_tgt_queue *
604nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
605 u16 qid, u16 sqsize)
606{
607 struct nvmet_fc_tgt_queue *queue;
608 unsigned long flags;
609 int ret;
610
James Smartdeb61742017-09-11 16:16:53 -0700611 if (qid > NVMET_NR_QUEUES)
James Smartc5343202016-12-02 00:28:43 -0800612 return NULL;
613
614 queue = kzalloc((sizeof(*queue) +
615 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
616 GFP_KERNEL);
617 if (!queue)
618 return NULL;
619
620 if (!nvmet_fc_tgt_a_get(assoc))
621 goto out_free_queue;
622
623 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
624 assoc->tgtport->fc_target_port.port_num,
625 assoc->a_id, qid);
626 if (!queue->work_q)
627 goto out_a_put;
628
629 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
630 queue->qid = qid;
631 queue->sqsize = sqsize;
632 queue->assoc = assoc;
633 queue->port = assoc->tgtport->port;
634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
635 INIT_LIST_HEAD(&queue->fod_list);
James Smart0fb228d2017-08-01 15:12:39 -0700636 INIT_LIST_HEAD(&queue->avail_defer_list);
637 INIT_LIST_HEAD(&queue->pending_cmd_list);
James Smartc5343202016-12-02 00:28:43 -0800638 atomic_set(&queue->connected, 0);
639 atomic_set(&queue->sqtail, 0);
640 atomic_set(&queue->rsn, 1);
641 atomic_set(&queue->zrspcnt, 0);
642 spin_lock_init(&queue->qlock);
643 kref_init(&queue->ref);
644
645 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
646
647 ret = nvmet_sq_init(&queue->nvme_sq);
648 if (ret)
649 goto out_fail_iodlist;
650
651 WARN_ON(assoc->queues[qid]);
652 spin_lock_irqsave(&assoc->tgtport->lock, flags);
653 assoc->queues[qid] = queue;
654 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
655
656 return queue;
657
658out_fail_iodlist:
659 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
660 destroy_workqueue(queue->work_q);
661out_a_put:
662 nvmet_fc_tgt_a_put(assoc);
663out_free_queue:
664 kfree(queue);
665 return NULL;
666}
667
668
669static void
670nvmet_fc_tgt_queue_free(struct kref *ref)
671{
672 struct nvmet_fc_tgt_queue *queue =
673 container_of(ref, struct nvmet_fc_tgt_queue, ref);
674 unsigned long flags;
675
676 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
677 queue->assoc->queues[queue->qid] = NULL;
678 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
679
680 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
681
682 nvmet_fc_tgt_a_put(queue->assoc);
683
684 destroy_workqueue(queue->work_q);
685
686 kfree(queue);
687}
688
689static void
690nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
691{
692 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
693}
694
695static int
696nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
697{
698 return kref_get_unless_zero(&queue->ref);
699}
700
701
702static void
James Smartc5343202016-12-02 00:28:43 -0800703nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
704{
James Smarta97ec512017-04-11 11:32:31 -0700705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
James Smartc5343202016-12-02 00:28:43 -0800706 struct nvmet_fc_fcp_iod *fod = queue->fod;
James Smart16a5a482017-08-14 11:20:32 -0700707 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
James Smartc5343202016-12-02 00:28:43 -0800708 unsigned long flags;
James Smarta97ec512017-04-11 11:32:31 -0700709 int i, writedataactive;
James Smartc5343202016-12-02 00:28:43 -0800710 bool disconnect;
711
712 disconnect = atomic_xchg(&queue->connected, 0);
713
714 spin_lock_irqsave(&queue->qlock, flags);
715 /* about outstanding io's */
716 for (i = 0; i < queue->sqsize; fod++, i++) {
717 if (fod->active) {
718 spin_lock(&fod->flock);
719 fod->abort = true;
James Smarta97ec512017-04-11 11:32:31 -0700720 writedataactive = fod->writedataactive;
James Smartc5343202016-12-02 00:28:43 -0800721 spin_unlock(&fod->flock);
James Smarta97ec512017-04-11 11:32:31 -0700722 /*
723 * only call lldd abort routine if waiting for
724 * writedata. other outstanding ops should finish
725 * on their own.
726 */
727 if (writedataactive) {
728 spin_lock(&fod->flock);
729 fod->aborted = true;
730 spin_unlock(&fod->flock);
731 tgtport->ops->fcp_abort(
732 &tgtport->fc_target_port, fod->fcpreq);
733 }
James Smartc5343202016-12-02 00:28:43 -0800734 }
735 }
James Smart0fb228d2017-08-01 15:12:39 -0700736
737 /* Cleanup defer'ed IOs in queue */
James Smart16a5a482017-08-14 11:20:32 -0700738 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
739 req_list) {
James Smart0fb228d2017-08-01 15:12:39 -0700740 list_del(&deferfcp->req_list);
741 kfree(deferfcp);
742 }
743
744 for (;;) {
745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
746 struct nvmet_fc_defer_fcp_req, req_list);
747 if (!deferfcp)
748 break;
749
750 list_del(&deferfcp->req_list);
751 spin_unlock_irqrestore(&queue->qlock, flags);
752
753 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
754 deferfcp->fcp_req);
755
756 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
757 deferfcp->fcp_req);
758
759 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
760 deferfcp->fcp_req);
761
762 kfree(deferfcp);
763
764 spin_lock_irqsave(&queue->qlock, flags);
765 }
James Smartc5343202016-12-02 00:28:43 -0800766 spin_unlock_irqrestore(&queue->qlock, flags);
767
768 flush_workqueue(queue->work_q);
769
770 if (disconnect)
771 nvmet_sq_destroy(&queue->nvme_sq);
772
773 nvmet_fc_tgt_q_put(queue);
774}
775
776static struct nvmet_fc_tgt_queue *
777nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
778 u64 connection_id)
779{
780 struct nvmet_fc_tgt_assoc *assoc;
781 struct nvmet_fc_tgt_queue *queue;
782 u64 association_id = nvmet_fc_getassociationid(connection_id);
783 u16 qid = nvmet_fc_getqueueid(connection_id);
784 unsigned long flags;
785
786 spin_lock_irqsave(&tgtport->lock, flags);
787 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
788 if (association_id == assoc->association_id) {
789 queue = assoc->queues[qid];
790 if (queue &&
791 (!atomic_read(&queue->connected) ||
792 !nvmet_fc_tgt_q_get(queue)))
793 queue = NULL;
794 spin_unlock_irqrestore(&tgtport->lock, flags);
795 return queue;
796 }
797 }
798 spin_unlock_irqrestore(&tgtport->lock, flags);
799 return NULL;
800}
801
802static struct nvmet_fc_tgt_assoc *
803nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
804{
805 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
806 unsigned long flags;
807 u64 ran;
808 int idx;
809 bool needrandom = true;
810
811 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
812 if (!assoc)
813 return NULL;
814
815 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
816 if (idx < 0)
817 goto out_free_assoc;
818
819 if (!nvmet_fc_tgtport_get(tgtport))
820 goto out_ida_put;
821
822 assoc->tgtport = tgtport;
823 assoc->a_id = idx;
824 INIT_LIST_HEAD(&assoc->a_list);
825 kref_init(&assoc->ref);
826
827 while (needrandom) {
828 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
829 ran = ran << BYTES_FOR_QID_SHIFT;
830
831 spin_lock_irqsave(&tgtport->lock, flags);
832 needrandom = false;
833 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
834 if (ran == tmpassoc->association_id) {
835 needrandom = true;
836 break;
837 }
838 if (!needrandom) {
839 assoc->association_id = ran;
840 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
841 }
842 spin_unlock_irqrestore(&tgtport->lock, flags);
843 }
844
845 return assoc;
846
847out_ida_put:
848 ida_simple_remove(&tgtport->assoc_cnt, idx);
849out_free_assoc:
850 kfree(assoc);
851 return NULL;
852}
853
854static void
855nvmet_fc_target_assoc_free(struct kref *ref)
856{
857 struct nvmet_fc_tgt_assoc *assoc =
858 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
859 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
860 unsigned long flags;
861
862 spin_lock_irqsave(&tgtport->lock, flags);
863 list_del(&assoc->a_list);
864 spin_unlock_irqrestore(&tgtport->lock, flags);
865 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
866 kfree(assoc);
867 nvmet_fc_tgtport_put(tgtport);
868}
869
870static void
871nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
872{
873 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
874}
875
876static int
877nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
878{
879 return kref_get_unless_zero(&assoc->ref);
880}
881
882static void
883nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
884{
885 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
886 struct nvmet_fc_tgt_queue *queue;
887 unsigned long flags;
888 int i;
889
890 spin_lock_irqsave(&tgtport->lock, flags);
James Smartdeb61742017-09-11 16:16:53 -0700891 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
James Smartc5343202016-12-02 00:28:43 -0800892 queue = assoc->queues[i];
893 if (queue) {
894 if (!nvmet_fc_tgt_q_get(queue))
895 continue;
896 spin_unlock_irqrestore(&tgtport->lock, flags);
897 nvmet_fc_delete_target_queue(queue);
898 nvmet_fc_tgt_q_put(queue);
899 spin_lock_irqsave(&tgtport->lock, flags);
900 }
901 }
902 spin_unlock_irqrestore(&tgtport->lock, flags);
903
904 nvmet_fc_tgt_a_put(assoc);
905}
906
907static struct nvmet_fc_tgt_assoc *
908nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
909 u64 association_id)
910{
911 struct nvmet_fc_tgt_assoc *assoc;
912 struct nvmet_fc_tgt_assoc *ret = NULL;
913 unsigned long flags;
914
915 spin_lock_irqsave(&tgtport->lock, flags);
916 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
917 if (association_id == assoc->association_id) {
918 ret = assoc;
919 nvmet_fc_tgt_a_get(assoc);
920 break;
921 }
922 }
923 spin_unlock_irqrestore(&tgtport->lock, flags);
924
925 return ret;
926}
927
928
929/**
930 * nvme_fc_register_targetport - transport entry point called by an
931 * LLDD to register the existence of a local
932 * NVME subystem FC port.
933 * @pinfo: pointer to information about the port to be registered
934 * @template: LLDD entrypoints and operational parameters for the port
935 * @dev: physical hardware device node port corresponds to. Will be
936 * used for DMA mappings
937 * @portptr: pointer to a local port pointer. Upon success, the routine
938 * will allocate a nvme_fc_local_port structure and place its
939 * address in the local port pointer. Upon failure, local port
940 * pointer will be set to NULL.
941 *
942 * Returns:
943 * a completion status. Must be 0 upon success; a negative errno
944 * (ex: -ENXIO) upon failure.
945 */
946int
947nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
948 struct nvmet_fc_target_template *template,
949 struct device *dev,
950 struct nvmet_fc_target_port **portptr)
951{
952 struct nvmet_fc_tgtport *newrec;
953 unsigned long flags;
954 int ret, idx;
955
956 if (!template->xmt_ls_rsp || !template->fcp_op ||
James Smarta97ec512017-04-11 11:32:31 -0700957 !template->fcp_abort ||
James Smart19b58d92017-04-11 11:32:29 -0700958 !template->fcp_req_release || !template->targetport_delete ||
James Smartc5343202016-12-02 00:28:43 -0800959 !template->max_hw_queues || !template->max_sgl_segments ||
960 !template->max_dif_sgl_segments || !template->dma_boundary) {
961 ret = -EINVAL;
962 goto out_regtgt_failed;
963 }
964
965 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
966 GFP_KERNEL);
967 if (!newrec) {
968 ret = -ENOMEM;
969 goto out_regtgt_failed;
970 }
971
972 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
973 if (idx < 0) {
974 ret = -ENOSPC;
975 goto out_fail_kfree;
976 }
977
978 if (!get_device(dev) && dev) {
979 ret = -ENODEV;
980 goto out_ida_put;
981 }
982
983 newrec->fc_target_port.node_name = pinfo->node_name;
984 newrec->fc_target_port.port_name = pinfo->port_name;
985 newrec->fc_target_port.private = &newrec[1];
986 newrec->fc_target_port.port_id = pinfo->port_id;
987 newrec->fc_target_port.port_num = idx;
988 INIT_LIST_HEAD(&newrec->tgt_list);
989 newrec->dev = dev;
990 newrec->ops = template;
991 spin_lock_init(&newrec->lock);
992 INIT_LIST_HEAD(&newrec->ls_list);
993 INIT_LIST_HEAD(&newrec->ls_busylist);
994 INIT_LIST_HEAD(&newrec->assoc_list);
995 kref_init(&newrec->ref);
996 ida_init(&newrec->assoc_cnt);
James Smart48fa3622017-07-31 13:21:14 -0700997 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
998 template->max_sgl_segments);
James Smartc5343202016-12-02 00:28:43 -0800999
1000 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1001 if (ret) {
1002 ret = -ENOMEM;
1003 goto out_free_newrec;
1004 }
1005
1006 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1007 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1008 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1009
1010 *portptr = &newrec->fc_target_port;
1011 return 0;
1012
1013out_free_newrec:
1014 put_device(dev);
1015out_ida_put:
1016 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1017out_fail_kfree:
1018 kfree(newrec);
1019out_regtgt_failed:
1020 *portptr = NULL;
1021 return ret;
1022}
1023EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1024
1025
1026static void
1027nvmet_fc_free_tgtport(struct kref *ref)
1028{
1029 struct nvmet_fc_tgtport *tgtport =
1030 container_of(ref, struct nvmet_fc_tgtport, ref);
1031 struct device *dev = tgtport->dev;
1032 unsigned long flags;
1033
1034 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1035 list_del(&tgtport->tgt_list);
1036 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1037
1038 nvmet_fc_free_ls_iodlist(tgtport);
1039
1040 /* let the LLDD know we've finished tearing it down */
1041 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1042
1043 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1044 tgtport->fc_target_port.port_num);
1045
1046 ida_destroy(&tgtport->assoc_cnt);
1047
1048 kfree(tgtport);
1049
1050 put_device(dev);
1051}
1052
1053static void
1054nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1055{
1056 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1057}
1058
1059static int
1060nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1061{
1062 return kref_get_unless_zero(&tgtport->ref);
1063}
1064
1065static void
1066__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1067{
1068 struct nvmet_fc_tgt_assoc *assoc, *next;
1069 unsigned long flags;
1070
1071 spin_lock_irqsave(&tgtport->lock, flags);
1072 list_for_each_entry_safe(assoc, next,
1073 &tgtport->assoc_list, a_list) {
1074 if (!nvmet_fc_tgt_a_get(assoc))
1075 continue;
1076 spin_unlock_irqrestore(&tgtport->lock, flags);
1077 nvmet_fc_delete_target_assoc(assoc);
1078 nvmet_fc_tgt_a_put(assoc);
1079 spin_lock_irqsave(&tgtport->lock, flags);
1080 }
1081 spin_unlock_irqrestore(&tgtport->lock, flags);
1082}
1083
1084/*
1085 * nvmet layer has called to terminate an association
1086 */
1087static void
1088nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1089{
1090 struct nvmet_fc_tgtport *tgtport, *next;
1091 struct nvmet_fc_tgt_assoc *assoc;
1092 struct nvmet_fc_tgt_queue *queue;
1093 unsigned long flags;
1094 bool found_ctrl = false;
1095
1096 /* this is a bit ugly, but don't want to make locks layered */
1097 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1098 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1099 tgt_list) {
1100 if (!nvmet_fc_tgtport_get(tgtport))
1101 continue;
1102 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1103
1104 spin_lock_irqsave(&tgtport->lock, flags);
1105 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1106 queue = assoc->queues[0];
1107 if (queue && queue->nvme_sq.ctrl == ctrl) {
1108 if (nvmet_fc_tgt_a_get(assoc))
1109 found_ctrl = true;
1110 break;
1111 }
1112 }
1113 spin_unlock_irqrestore(&tgtport->lock, flags);
1114
1115 nvmet_fc_tgtport_put(tgtport);
1116
1117 if (found_ctrl) {
1118 nvmet_fc_delete_target_assoc(assoc);
1119 nvmet_fc_tgt_a_put(assoc);
1120 return;
1121 }
1122
1123 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1124 }
1125 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1126}
1127
1128/**
1129 * nvme_fc_unregister_targetport - transport entry point called by an
1130 * LLDD to deregister/remove a previously
1131 * registered a local NVME subsystem FC port.
1132 * @tgtport: pointer to the (registered) target port that is to be
1133 * deregistered.
1134 *
1135 * Returns:
1136 * a completion status. Must be 0 upon success; a negative errno
1137 * (ex: -ENXIO) upon failure.
1138 */
1139int
1140nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1141{
1142 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1143
1144 /* terminate any outstanding associations */
1145 __nvmet_fc_free_assocs(tgtport);
1146
1147 nvmet_fc_tgtport_put(tgtport);
1148
1149 return 0;
1150}
1151EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1152
1153
1154/* *********************** FC-NVME LS Handling **************************** */
1155
1156
1157static void
Christoph Hellwig3f5e1182017-04-21 10:40:22 +02001158nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
James Smartc5343202016-12-02 00:28:43 -08001159{
1160 struct fcnvme_ls_acc_hdr *acc = buf;
1161
1162 acc->w0.ls_cmd = ls_cmd;
1163 acc->desc_list_len = desc_len;
1164 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1165 acc->rqst.desc_len =
1166 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1167 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1168}
1169
1170static int
1171nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1172 u8 reason, u8 explanation, u8 vendor)
1173{
1174 struct fcnvme_ls_rjt *rjt = buf;
1175
1176 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1177 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1178 ls_cmd);
1179 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1180 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1181 rjt->rjt.reason_code = reason;
1182 rjt->rjt.reason_explanation = explanation;
1183 rjt->rjt.vendor = vendor;
1184
1185 return sizeof(struct fcnvme_ls_rjt);
1186}
1187
1188/* Validation Error indexes into the string table below */
1189enum {
1190 VERR_NO_ERROR = 0,
1191 VERR_CR_ASSOC_LEN = 1,
1192 VERR_CR_ASSOC_RQST_LEN = 2,
1193 VERR_CR_ASSOC_CMD = 3,
1194 VERR_CR_ASSOC_CMD_LEN = 4,
1195 VERR_ERSP_RATIO = 5,
1196 VERR_ASSOC_ALLOC_FAIL = 6,
1197 VERR_QUEUE_ALLOC_FAIL = 7,
1198 VERR_CR_CONN_LEN = 8,
1199 VERR_CR_CONN_RQST_LEN = 9,
1200 VERR_ASSOC_ID = 10,
1201 VERR_ASSOC_ID_LEN = 11,
1202 VERR_NO_ASSOC = 12,
1203 VERR_CONN_ID = 13,
1204 VERR_CONN_ID_LEN = 14,
1205 VERR_NO_CONN = 15,
1206 VERR_CR_CONN_CMD = 16,
1207 VERR_CR_CONN_CMD_LEN = 17,
1208 VERR_DISCONN_LEN = 18,
1209 VERR_DISCONN_RQST_LEN = 19,
1210 VERR_DISCONN_CMD = 20,
1211 VERR_DISCONN_CMD_LEN = 21,
1212 VERR_DISCONN_SCOPE = 22,
1213 VERR_RS_LEN = 23,
1214 VERR_RS_RQST_LEN = 24,
1215 VERR_RS_CMD = 25,
1216 VERR_RS_CMD_LEN = 26,
1217 VERR_RS_RCTL = 27,
1218 VERR_RS_RO = 28,
1219};
1220
1221static char *validation_errors[] = {
1222 "OK",
1223 "Bad CR_ASSOC Length",
1224 "Bad CR_ASSOC Rqst Length",
1225 "Not CR_ASSOC Cmd",
1226 "Bad CR_ASSOC Cmd Length",
1227 "Bad Ersp Ratio",
1228 "Association Allocation Failed",
1229 "Queue Allocation Failed",
1230 "Bad CR_CONN Length",
1231 "Bad CR_CONN Rqst Length",
1232 "Not Association ID",
1233 "Bad Association ID Length",
1234 "No Association",
1235 "Not Connection ID",
1236 "Bad Connection ID Length",
1237 "No Connection",
1238 "Not CR_CONN Cmd",
1239 "Bad CR_CONN Cmd Length",
1240 "Bad DISCONN Length",
1241 "Bad DISCONN Rqst Length",
1242 "Not DISCONN Cmd",
1243 "Bad DISCONN Cmd Length",
1244 "Bad Disconnect Scope",
1245 "Bad RS Length",
1246 "Bad RS Rqst Length",
1247 "Not RS Cmd",
1248 "Bad RS Cmd Length",
1249 "Bad RS R_CTL",
1250 "Bad RS Relative Offset",
1251};
1252
1253static void
1254nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1255 struct nvmet_fc_ls_iod *iod)
1256{
1257 struct fcnvme_ls_cr_assoc_rqst *rqst =
1258 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1259 struct fcnvme_ls_cr_assoc_acc *acc =
1260 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1261 struct nvmet_fc_tgt_queue *queue;
1262 int ret = 0;
1263
1264 memset(acc, 0, sizeof(*acc));
1265
James Smart4cb7ca82017-07-07 18:08:26 -07001266 /*
1267 * FC-NVME spec changes. There are initiators sending different
1268 * lengths as padding sizes for Create Association Cmd descriptor
1269 * was incorrect.
1270 * Accept anything of "minimum" length. Assume format per 1.15
1271 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1272 * trailing pad length is.
1273 */
1274 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
James Smartc5343202016-12-02 00:28:43 -08001275 ret = VERR_CR_ASSOC_LEN;
Christoph Hellwig7722ecd2017-07-14 11:14:46 +02001276 else if (be32_to_cpu(rqst->desc_list_len) <
1277 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
James Smartc5343202016-12-02 00:28:43 -08001278 ret = VERR_CR_ASSOC_RQST_LEN;
1279 else if (rqst->assoc_cmd.desc_tag !=
1280 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1281 ret = VERR_CR_ASSOC_CMD;
Christoph Hellwig7722ecd2017-07-14 11:14:46 +02001282 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1283 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
James Smartc5343202016-12-02 00:28:43 -08001284 ret = VERR_CR_ASSOC_CMD_LEN;
1285 else if (!rqst->assoc_cmd.ersp_ratio ||
1286 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1287 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1288 ret = VERR_ERSP_RATIO;
1289
1290 else {
1291 /* new association w/ admin queue */
1292 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1293 if (!iod->assoc)
1294 ret = VERR_ASSOC_ALLOC_FAIL;
1295 else {
1296 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1297 be16_to_cpu(rqst->assoc_cmd.sqsize));
1298 if (!queue)
1299 ret = VERR_QUEUE_ALLOC_FAIL;
1300 }
1301 }
1302
1303 if (ret) {
1304 dev_err(tgtport->dev,
1305 "Create Association LS failed: %s\n",
1306 validation_errors[ret]);
1307 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1308 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
James Smart4083aa92017-03-23 20:41:24 -07001309 FCNVME_RJT_RC_LOGIC,
1310 FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001311 return;
1312 }
1313
1314 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1315 atomic_set(&queue->connected, 1);
1316 queue->sqhd = 0; /* best place to init value */
1317
1318 /* format a response */
1319
1320 iod->lsreq->rsplen = sizeof(*acc);
1321
1322 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1323 fcnvme_lsdesc_len(
1324 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1325 FCNVME_LS_CREATE_ASSOCIATION);
1326 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1327 acc->associd.desc_len =
1328 fcnvme_lsdesc_len(
1329 sizeof(struct fcnvme_lsdesc_assoc_id));
1330 acc->associd.association_id =
1331 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1332 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1333 acc->connectid.desc_len =
1334 fcnvme_lsdesc_len(
1335 sizeof(struct fcnvme_lsdesc_conn_id));
1336 acc->connectid.connection_id = acc->associd.association_id;
1337}
1338
1339static void
1340nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1341 struct nvmet_fc_ls_iod *iod)
1342{
1343 struct fcnvme_ls_cr_conn_rqst *rqst =
1344 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1345 struct fcnvme_ls_cr_conn_acc *acc =
1346 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1347 struct nvmet_fc_tgt_queue *queue;
1348 int ret = 0;
1349
1350 memset(acc, 0, sizeof(*acc));
1351
1352 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1353 ret = VERR_CR_CONN_LEN;
1354 else if (rqst->desc_list_len !=
1355 fcnvme_lsdesc_len(
1356 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1357 ret = VERR_CR_CONN_RQST_LEN;
1358 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1359 ret = VERR_ASSOC_ID;
1360 else if (rqst->associd.desc_len !=
1361 fcnvme_lsdesc_len(
1362 sizeof(struct fcnvme_lsdesc_assoc_id)))
1363 ret = VERR_ASSOC_ID_LEN;
1364 else if (rqst->connect_cmd.desc_tag !=
1365 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1366 ret = VERR_CR_CONN_CMD;
1367 else if (rqst->connect_cmd.desc_len !=
1368 fcnvme_lsdesc_len(
1369 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1370 ret = VERR_CR_CONN_CMD_LEN;
1371 else if (!rqst->connect_cmd.ersp_ratio ||
1372 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1373 be16_to_cpu(rqst->connect_cmd.sqsize)))
1374 ret = VERR_ERSP_RATIO;
1375
1376 else {
1377 /* new io queue */
1378 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1379 be64_to_cpu(rqst->associd.association_id));
1380 if (!iod->assoc)
1381 ret = VERR_NO_ASSOC;
1382 else {
1383 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1384 be16_to_cpu(rqst->connect_cmd.qid),
1385 be16_to_cpu(rqst->connect_cmd.sqsize));
1386 if (!queue)
1387 ret = VERR_QUEUE_ALLOC_FAIL;
1388
1389 /* release get taken in nvmet_fc_find_target_assoc */
1390 nvmet_fc_tgt_a_put(iod->assoc);
1391 }
1392 }
1393
1394 if (ret) {
1395 dev_err(tgtport->dev,
1396 "Create Connection LS failed: %s\n",
1397 validation_errors[ret]);
1398 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1399 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1400 (ret == VERR_NO_ASSOC) ?
James Smart4083aa92017-03-23 20:41:24 -07001401 FCNVME_RJT_RC_INV_ASSOC :
1402 FCNVME_RJT_RC_LOGIC,
1403 FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001404 return;
1405 }
1406
1407 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1408 atomic_set(&queue->connected, 1);
1409 queue->sqhd = 0; /* best place to init value */
1410
1411 /* format a response */
1412
1413 iod->lsreq->rsplen = sizeof(*acc);
1414
1415 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1416 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1417 FCNVME_LS_CREATE_CONNECTION);
1418 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1419 acc->connectid.desc_len =
1420 fcnvme_lsdesc_len(
1421 sizeof(struct fcnvme_lsdesc_conn_id));
1422 acc->connectid.connection_id =
1423 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1424 be16_to_cpu(rqst->connect_cmd.qid)));
1425}
1426
1427static void
1428nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1429 struct nvmet_fc_ls_iod *iod)
1430{
1431 struct fcnvme_ls_disconnect_rqst *rqst =
1432 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1433 struct fcnvme_ls_disconnect_acc *acc =
1434 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
James Smartc81e55e2016-12-24 09:46:43 -08001435 struct nvmet_fc_tgt_queue *queue = NULL;
James Smartc5343202016-12-02 00:28:43 -08001436 struct nvmet_fc_tgt_assoc *assoc;
1437 int ret = 0;
1438 bool del_assoc = false;
1439
1440 memset(acc, 0, sizeof(*acc));
1441
1442 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1443 ret = VERR_DISCONN_LEN;
1444 else if (rqst->desc_list_len !=
1445 fcnvme_lsdesc_len(
1446 sizeof(struct fcnvme_ls_disconnect_rqst)))
1447 ret = VERR_DISCONN_RQST_LEN;
1448 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1449 ret = VERR_ASSOC_ID;
1450 else if (rqst->associd.desc_len !=
1451 fcnvme_lsdesc_len(
1452 sizeof(struct fcnvme_lsdesc_assoc_id)))
1453 ret = VERR_ASSOC_ID_LEN;
1454 else if (rqst->discon_cmd.desc_tag !=
1455 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1456 ret = VERR_DISCONN_CMD;
1457 else if (rqst->discon_cmd.desc_len !=
1458 fcnvme_lsdesc_len(
1459 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1460 ret = VERR_DISCONN_CMD_LEN;
1461 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1462 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1463 ret = VERR_DISCONN_SCOPE;
1464 else {
1465 /* match an active association */
1466 assoc = nvmet_fc_find_target_assoc(tgtport,
1467 be64_to_cpu(rqst->associd.association_id));
1468 iod->assoc = assoc;
James Smartc81e55e2016-12-24 09:46:43 -08001469 if (assoc) {
1470 if (rqst->discon_cmd.scope ==
1471 FCNVME_DISCONN_CONNECTION) {
1472 queue = nvmet_fc_find_target_queue(tgtport,
1473 be64_to_cpu(
1474 rqst->discon_cmd.id));
1475 if (!queue) {
1476 nvmet_fc_tgt_a_put(assoc);
1477 ret = VERR_NO_CONN;
1478 }
1479 }
1480 } else
James Smartc5343202016-12-02 00:28:43 -08001481 ret = VERR_NO_ASSOC;
1482 }
1483
1484 if (ret) {
1485 dev_err(tgtport->dev,
1486 "Disconnect LS failed: %s\n",
1487 validation_errors[ret]);
1488 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1489 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
James Smart4083aa92017-03-23 20:41:24 -07001490 (ret == VERR_NO_ASSOC) ?
1491 FCNVME_RJT_RC_INV_ASSOC :
1492 (ret == VERR_NO_CONN) ?
1493 FCNVME_RJT_RC_INV_CONN :
1494 FCNVME_RJT_RC_LOGIC,
1495 FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001496 return;
1497 }
1498
1499 /* format a response */
1500
1501 iod->lsreq->rsplen = sizeof(*acc);
1502
1503 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1504 fcnvme_lsdesc_len(
1505 sizeof(struct fcnvme_ls_disconnect_acc)),
1506 FCNVME_LS_DISCONNECT);
1507
1508
James Smartc81e55e2016-12-24 09:46:43 -08001509 /* are we to delete a Connection ID (queue) */
1510 if (queue) {
1511 int qid = queue->qid;
James Smartc5343202016-12-02 00:28:43 -08001512
James Smartc81e55e2016-12-24 09:46:43 -08001513 nvmet_fc_delete_target_queue(queue);
James Smartc5343202016-12-02 00:28:43 -08001514
James Smartc81e55e2016-12-24 09:46:43 -08001515 /* release the get taken by find_target_queue */
1516 nvmet_fc_tgt_q_put(queue);
James Smartc5343202016-12-02 00:28:43 -08001517
James Smartc81e55e2016-12-24 09:46:43 -08001518 /* tear association down if io queue terminated */
1519 if (!qid)
1520 del_assoc = true;
James Smartc5343202016-12-02 00:28:43 -08001521 }
1522
1523 /* release get taken in nvmet_fc_find_target_assoc */
1524 nvmet_fc_tgt_a_put(iod->assoc);
1525
1526 if (del_assoc)
1527 nvmet_fc_delete_target_assoc(iod->assoc);
1528}
1529
1530
1531/* *********************** NVME Ctrl Routines **************************** */
1532
1533
1534static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1535
1536static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1537
1538static void
1539nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1540{
1541 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1542 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1543
1544 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1545 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1546 nvmet_fc_free_ls_iod(tgtport, iod);
1547 nvmet_fc_tgtport_put(tgtport);
1548}
1549
1550static void
1551nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1552 struct nvmet_fc_ls_iod *iod)
1553{
1554 int ret;
1555
1556 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1557 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1558
1559 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1560 if (ret)
1561 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1562}
1563
1564/*
1565 * Actual processing routine for received FC-NVME LS Requests from the LLD
1566 */
1567static void
1568nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1569 struct nvmet_fc_ls_iod *iod)
1570{
1571 struct fcnvme_ls_rqst_w0 *w0 =
1572 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1573
1574 iod->lsreq->nvmet_fc_private = iod;
1575 iod->lsreq->rspbuf = iod->rspbuf;
1576 iod->lsreq->rspdma = iod->rspdma;
1577 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1578 /* Be preventative. handlers will later set to valid length */
1579 iod->lsreq->rsplen = 0;
1580
1581 iod->assoc = NULL;
1582
1583 /*
1584 * handlers:
1585 * parse request input, execute the request, and format the
1586 * LS response
1587 */
1588 switch (w0->ls_cmd) {
1589 case FCNVME_LS_CREATE_ASSOCIATION:
1590 /* Creates Association and initial Admin Queue/Connection */
1591 nvmet_fc_ls_create_association(tgtport, iod);
1592 break;
1593 case FCNVME_LS_CREATE_CONNECTION:
1594 /* Creates an IO Queue/Connection */
1595 nvmet_fc_ls_create_connection(tgtport, iod);
1596 break;
1597 case FCNVME_LS_DISCONNECT:
1598 /* Terminate a Queue/Connection or the Association */
1599 nvmet_fc_ls_disconnect(tgtport, iod);
1600 break;
1601 default:
1602 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1603 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
James Smart4083aa92017-03-23 20:41:24 -07001604 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
James Smartc5343202016-12-02 00:28:43 -08001605 }
1606
1607 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1608}
1609
1610/*
1611 * Actual processing routine for received FC-NVME LS Requests from the LLD
1612 */
1613static void
1614nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1615{
1616 struct nvmet_fc_ls_iod *iod =
1617 container_of(work, struct nvmet_fc_ls_iod, work);
1618 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1619
1620 nvmet_fc_handle_ls_rqst(tgtport, iod);
1621}
1622
1623
1624/**
1625 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1626 * upon the reception of a NVME LS request.
1627 *
1628 * The nvmet-fc layer will copy payload to an internal structure for
1629 * processing. As such, upon completion of the routine, the LLDD may
1630 * immediately free/reuse the LS request buffer passed in the call.
1631 *
1632 * If this routine returns error, the LLDD should abort the exchange.
1633 *
1634 * @tgtport: pointer to the (registered) target port the LS was
1635 * received on.
1636 * @lsreq: pointer to a lsreq request structure to be used to reference
1637 * the exchange corresponding to the LS.
1638 * @lsreqbuf: pointer to the buffer containing the LS Request
1639 * @lsreqbuf_len: length, in bytes, of the received LS request
1640 */
1641int
1642nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1643 struct nvmefc_tgt_ls_req *lsreq,
1644 void *lsreqbuf, u32 lsreqbuf_len)
1645{
1646 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1647 struct nvmet_fc_ls_iod *iod;
1648
1649 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1650 return -E2BIG;
1651
1652 if (!nvmet_fc_tgtport_get(tgtport))
1653 return -ESHUTDOWN;
1654
1655 iod = nvmet_fc_alloc_ls_iod(tgtport);
1656 if (!iod) {
1657 nvmet_fc_tgtport_put(tgtport);
1658 return -ENOENT;
1659 }
1660
1661 iod->lsreq = lsreq;
1662 iod->fcpreq = NULL;
1663 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1664 iod->rqstdatalen = lsreqbuf_len;
1665
1666 schedule_work(&iod->work);
1667
1668 return 0;
1669}
1670EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1671
1672
1673/*
1674 * **********************
1675 * Start of FCP handling
1676 * **********************
1677 */
1678
1679static int
1680nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1681{
1682 struct scatterlist *sg;
1683 struct page *page;
1684 unsigned int nent;
1685 u32 page_len, length;
1686 int i = 0;
1687
1688 length = fod->total_length;
1689 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1690 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1691 if (!sg)
1692 goto out;
1693
1694 sg_init_table(sg, nent);
1695
1696 while (length) {
1697 page_len = min_t(u32, length, PAGE_SIZE);
1698
1699 page = alloc_page(GFP_KERNEL);
1700 if (!page)
1701 goto out_free_pages;
1702
1703 sg_set_page(&sg[i], page, page_len, 0);
1704 length -= page_len;
1705 i++;
1706 }
1707
1708 fod->data_sg = sg;
1709 fod->data_sg_cnt = nent;
1710 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1711 ((fod->io_dir == NVMET_FCP_WRITE) ?
1712 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1713 /* note: write from initiator perspective */
1714
1715 return 0;
1716
1717out_free_pages:
1718 while (i > 0) {
1719 i--;
1720 __free_page(sg_page(&sg[i]));
1721 }
1722 kfree(sg);
1723 fod->data_sg = NULL;
1724 fod->data_sg_cnt = 0;
1725out:
1726 return NVME_SC_INTERNAL;
1727}
1728
1729static void
1730nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1731{
1732 struct scatterlist *sg;
1733 int count;
1734
1735 if (!fod->data_sg || !fod->data_sg_cnt)
1736 return;
1737
1738 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1739 ((fod->io_dir == NVMET_FCP_WRITE) ?
1740 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1741 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1742 __free_page(sg_page(sg));
1743 kfree(fod->data_sg);
James Smartc820ad42017-03-23 20:41:26 -07001744 fod->data_sg = NULL;
1745 fod->data_sg_cnt = 0;
James Smartc5343202016-12-02 00:28:43 -08001746}
1747
1748
1749static bool
1750queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1751{
1752 u32 sqtail, used;
1753
1754 /* egad, this is ugly. And sqtail is just a best guess */
1755 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1756
1757 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1758 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1759}
1760
1761/*
1762 * Prep RSP payload.
1763 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1764 */
1765static void
1766nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1767 struct nvmet_fc_fcp_iod *fod)
1768{
1769 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1770 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1771 struct nvme_completion *cqe = &ersp->cqe;
1772 u32 *cqewd = (u32 *)cqe;
1773 bool send_ersp = false;
1774 u32 rsn, rspcnt, xfr_length;
1775
1776 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1777 xfr_length = fod->total_length;
1778 else
1779 xfr_length = fod->offset;
1780
1781 /*
1782 * check to see if we can send a 0's rsp.
1783 * Note: to send a 0's response, the NVME-FC host transport will
1784 * recreate the CQE. The host transport knows: sq id, SQHD (last
1785 * seen in an ersp), and command_id. Thus it will create a
1786 * zero-filled CQE with those known fields filled in. Transport
1787 * must send an ersp for any condition where the cqe won't match
1788 * this.
1789 *
1790 * Here are the FC-NVME mandated cases where we must send an ersp:
1791 * every N responses, where N=ersp_ratio
1792 * force fabric commands to send ersp's (not in FC-NVME but good
1793 * practice)
1794 * normal cmds: any time status is non-zero, or status is zero
1795 * but words 0 or 1 are non-zero.
1796 * the SQ is 90% or more full
1797 * the cmd is a fused command
1798 * transferred data length not equal to cmd iu length
1799 */
1800 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1801 if (!(rspcnt % fod->queue->ersp_ratio) ||
1802 sqe->opcode == nvme_fabrics_command ||
1803 xfr_length != fod->total_length ||
1804 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1805 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
Christoph Hellwig8ad76cf2017-04-21 10:43:13 +02001806 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
James Smartc5343202016-12-02 00:28:43 -08001807 send_ersp = true;
1808
1809 /* re-set the fields */
1810 fod->fcpreq->rspaddr = ersp;
1811 fod->fcpreq->rspdma = fod->rspdma;
1812
1813 if (!send_ersp) {
1814 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1815 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1816 } else {
1817 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1818 rsn = atomic_inc_return(&fod->queue->rsn);
1819 ersp->rsn = cpu_to_be32(rsn);
1820 ersp->xfrd_len = cpu_to_be32(xfr_length);
1821 fod->fcpreq->rsplen = sizeof(*ersp);
1822 }
1823
1824 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1825 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1826}
1827
1828static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1829
1830static void
James Smarta97ec512017-04-11 11:32:31 -07001831nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1832 struct nvmet_fc_fcp_iod *fod)
1833{
1834 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1835
1836 /* data no longer needed */
1837 nvmet_fc_free_tgt_pgs(fod);
1838
1839 /*
1840 * if an ABTS was received or we issued the fcp_abort early
1841 * don't call abort routine again.
1842 */
1843 /* no need to take lock - lock was taken earlier to get here */
1844 if (!fod->aborted)
1845 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1846
1847 nvmet_fc_free_fcp_iod(fod->queue, fod);
1848}
1849
1850static void
James Smartc5343202016-12-02 00:28:43 -08001851nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1852 struct nvmet_fc_fcp_iod *fod)
1853{
1854 int ret;
1855
1856 fod->fcpreq->op = NVMET_FCOP_RSP;
1857 fod->fcpreq->timeout = 0;
1858
1859 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1860
1861 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1862 if (ret)
James Smarta97ec512017-04-11 11:32:31 -07001863 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08001864}
1865
1866static void
1867nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1868 struct nvmet_fc_fcp_iod *fod, u8 op)
1869{
1870 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
James Smarta97ec512017-04-11 11:32:31 -07001871 unsigned long flags;
James Smart48fa3622017-07-31 13:21:14 -07001872 u32 tlen;
James Smartc5343202016-12-02 00:28:43 -08001873 int ret;
1874
1875 fcpreq->op = op;
1876 fcpreq->offset = fod->offset;
1877 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
James Smart48fa3622017-07-31 13:21:14 -07001878
1879 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
James Smartc5343202016-12-02 00:28:43 -08001880 (fod->total_length - fod->offset));
James Smartc5343202016-12-02 00:28:43 -08001881 fcpreq->transfer_length = tlen;
1882 fcpreq->transferred_length = 0;
1883 fcpreq->fcp_error = 0;
1884 fcpreq->rsplen = 0;
1885
James Smart48fa3622017-07-31 13:21:14 -07001886 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1887 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
James Smartc5343202016-12-02 00:28:43 -08001888
1889 /*
1890 * If the last READDATA request: check if LLDD supports
1891 * combined xfr with response.
1892 */
1893 if ((op == NVMET_FCOP_READDATA) &&
1894 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1895 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1896 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1897 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1898 }
1899
1900 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1901 if (ret) {
1902 /*
1903 * should be ok to set w/o lock as its in the thread of
1904 * execution (not an async timer routine) and doesn't
1905 * contend with any clearing action
1906 */
1907 fod->abort = true;
1908
James Smarta97ec512017-04-11 11:32:31 -07001909 if (op == NVMET_FCOP_WRITEDATA) {
1910 spin_lock_irqsave(&fod->flock, flags);
1911 fod->writedataactive = false;
1912 spin_unlock_irqrestore(&fod->flock, flags);
James Smart29b3d262017-09-07 16:27:27 -07001913 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
James Smarta97ec512017-04-11 11:32:31 -07001914 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
James Smartc5343202016-12-02 00:28:43 -08001915 fcpreq->fcp_error = ret;
1916 fcpreq->transferred_length = 0;
1917 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1918 }
1919 }
1920}
1921
James Smarta97ec512017-04-11 11:32:31 -07001922static inline bool
1923__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1924{
1925 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1926 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1927
1928 /* if in the middle of an io and we need to tear down */
1929 if (abort) {
1930 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
James Smart29b3d262017-09-07 16:27:27 -07001931 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
James Smarta97ec512017-04-11 11:32:31 -07001932 return true;
1933 }
1934
1935 nvmet_fc_abort_op(tgtport, fod);
1936 return true;
1937 }
1938
1939 return false;
1940}
1941
James Smart39498fa2017-04-11 11:32:28 -07001942/*
1943 * actual done handler for FCP operations when completed by the lldd
1944 */
James Smartc5343202016-12-02 00:28:43 -08001945static void
James Smart39498fa2017-04-11 11:32:28 -07001946nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
James Smartc5343202016-12-02 00:28:43 -08001947{
James Smart39498fa2017-04-11 11:32:28 -07001948 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
James Smartc5343202016-12-02 00:28:43 -08001949 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1950 unsigned long flags;
1951 bool abort;
1952
1953 spin_lock_irqsave(&fod->flock, flags);
1954 abort = fod->abort;
James Smarta97ec512017-04-11 11:32:31 -07001955 fod->writedataactive = false;
James Smartc5343202016-12-02 00:28:43 -08001956 spin_unlock_irqrestore(&fod->flock, flags);
1957
James Smartc5343202016-12-02 00:28:43 -08001958 switch (fcpreq->op) {
1959
1960 case NVMET_FCOP_WRITEDATA:
James Smarta97ec512017-04-11 11:32:31 -07001961 if (__nvmet_fc_fod_op_abort(fod, abort))
1962 return;
James Smartf64935a2016-12-24 09:46:44 -08001963 if (fcpreq->fcp_error ||
James Smartc5343202016-12-02 00:28:43 -08001964 fcpreq->transferred_length != fcpreq->transfer_length) {
James Smarta97ec512017-04-11 11:32:31 -07001965 spin_lock(&fod->flock);
1966 fod->abort = true;
1967 spin_unlock(&fod->flock);
1968
James Smart29b3d262017-09-07 16:27:27 -07001969 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
James Smartc5343202016-12-02 00:28:43 -08001970 return;
1971 }
1972
1973 fod->offset += fcpreq->transferred_length;
1974 if (fod->offset != fod->total_length) {
James Smarta97ec512017-04-11 11:32:31 -07001975 spin_lock_irqsave(&fod->flock, flags);
1976 fod->writedataactive = true;
1977 spin_unlock_irqrestore(&fod->flock, flags);
1978
James Smartc5343202016-12-02 00:28:43 -08001979 /* transfer the next chunk */
1980 nvmet_fc_transfer_fcp_data(tgtport, fod,
1981 NVMET_FCOP_WRITEDATA);
1982 return;
1983 }
1984
1985 /* data transfer complete, resume with nvmet layer */
1986
1987 fod->req.execute(&fod->req);
1988
1989 break;
1990
1991 case NVMET_FCOP_READDATA:
1992 case NVMET_FCOP_READDATA_RSP:
James Smarta97ec512017-04-11 11:32:31 -07001993 if (__nvmet_fc_fod_op_abort(fod, abort))
1994 return;
James Smartf64935a2016-12-24 09:46:44 -08001995 if (fcpreq->fcp_error ||
James Smartc5343202016-12-02 00:28:43 -08001996 fcpreq->transferred_length != fcpreq->transfer_length) {
James Smarta97ec512017-04-11 11:32:31 -07001997 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08001998 return;
1999 }
2000
2001 /* success */
2002
2003 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2004 /* data no longer needed */
2005 nvmet_fc_free_tgt_pgs(fod);
James Smartc5343202016-12-02 00:28:43 -08002006 nvmet_fc_free_fcp_iod(fod->queue, fod);
2007 return;
2008 }
2009
2010 fod->offset += fcpreq->transferred_length;
2011 if (fod->offset != fod->total_length) {
2012 /* transfer the next chunk */
2013 nvmet_fc_transfer_fcp_data(tgtport, fod,
2014 NVMET_FCOP_READDATA);
2015 return;
2016 }
2017
2018 /* data transfer complete, send response */
2019
2020 /* data no longer needed */
2021 nvmet_fc_free_tgt_pgs(fod);
2022
2023 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2024
2025 break;
2026
2027 case NVMET_FCOP_RSP:
James Smarta97ec512017-04-11 11:32:31 -07002028 if (__nvmet_fc_fod_op_abort(fod, abort))
2029 return;
James Smartc5343202016-12-02 00:28:43 -08002030 nvmet_fc_free_fcp_iod(fod->queue, fod);
2031 break;
2032
2033 default:
James Smartc5343202016-12-02 00:28:43 -08002034 break;
2035 }
2036}
2037
James Smart39498fa2017-04-11 11:32:28 -07002038static void
2039nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2040{
2041 struct nvmet_fc_fcp_iod *fod =
2042 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2043
2044 nvmet_fc_fod_op_done(fod);
2045}
2046
2047static void
2048nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2049{
2050 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2051 struct nvmet_fc_tgt_queue *queue = fod->queue;
2052
2053 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2054 /* context switch so completion is not in ISR context */
2055 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2056 else
2057 nvmet_fc_fod_op_done(fod);
2058}
2059
James Smartc5343202016-12-02 00:28:43 -08002060/*
2061 * actual completion handler after execution by the nvmet layer
2062 */
2063static void
2064__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2065 struct nvmet_fc_fcp_iod *fod, int status)
2066{
2067 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2068 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2069 unsigned long flags;
2070 bool abort;
2071
2072 spin_lock_irqsave(&fod->flock, flags);
2073 abort = fod->abort;
2074 spin_unlock_irqrestore(&fod->flock, flags);
2075
2076 /* if we have a CQE, snoop the last sq_head value */
2077 if (!status)
2078 fod->queue->sqhd = cqe->sq_head;
2079
2080 if (abort) {
James Smarta97ec512017-04-11 11:32:31 -07002081 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08002082 return;
2083 }
2084
2085 /* if an error handling the cmd post initial parsing */
2086 if (status) {
2087 /* fudge up a failed CQE status for our transport error */
2088 memset(cqe, 0, sizeof(*cqe));
2089 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2090 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2091 cqe->command_id = sqe->command_id;
2092 cqe->status = cpu_to_le16(status);
2093 } else {
2094
2095 /*
2096 * try to push the data even if the SQE status is non-zero.
2097 * There may be a status where data still was intended to
2098 * be moved
2099 */
2100 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2101 /* push the data over before sending rsp */
2102 nvmet_fc_transfer_fcp_data(tgtport, fod,
2103 NVMET_FCOP_READDATA);
2104 return;
2105 }
2106
2107 /* writes & no data - fall thru */
2108 }
2109
2110 /* data no longer needed */
2111 nvmet_fc_free_tgt_pgs(fod);
2112
2113 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2114}
2115
2116
2117static void
2118nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2119{
2120 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2121 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2122
2123 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2124}
2125
2126
2127/*
2128 * Actual processing routine for received FC-NVME LS Requests from the LLD
2129 */
Christoph Hellwigedba98d2017-04-21 10:37:59 +02002130static void
James Smartc5343202016-12-02 00:28:43 -08002131nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2132 struct nvmet_fc_fcp_iod *fod)
2133{
2134 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2135 int ret;
2136
2137 /*
2138 * Fused commands are currently not supported in the linux
2139 * implementation.
2140 *
2141 * As such, the implementation of the FC transport does not
2142 * look at the fused commands and order delivery to the upper
2143 * layer until we have both based on csn.
2144 */
2145
2146 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2147
2148 fod->total_length = be32_to_cpu(cmdiu->data_len);
2149 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2150 fod->io_dir = NVMET_FCP_WRITE;
2151 if (!nvme_is_write(&cmdiu->sqe))
2152 goto transport_error;
2153 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2154 fod->io_dir = NVMET_FCP_READ;
2155 if (nvme_is_write(&cmdiu->sqe))
2156 goto transport_error;
2157 } else {
2158 fod->io_dir = NVMET_FCP_NODATA;
2159 if (fod->total_length)
2160 goto transport_error;
2161 }
2162
2163 fod->req.cmd = &fod->cmdiubuf.sqe;
2164 fod->req.rsp = &fod->rspiubuf.cqe;
2165 fod->req.port = fod->queue->port;
2166
2167 /* ensure nvmet handlers will set cmd handler callback */
2168 fod->req.execute = NULL;
2169
2170 /* clear any response payload */
2171 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2172
James Smart188f7e82017-06-15 23:41:41 -07002173 fod->data_sg = NULL;
2174 fod->data_sg_cnt = 0;
2175
James Smartc5343202016-12-02 00:28:43 -08002176 ret = nvmet_req_init(&fod->req,
2177 &fod->queue->nvme_cq,
2178 &fod->queue->nvme_sq,
2179 &nvmet_fc_tgt_fcp_ops);
James Smart188f7e82017-06-15 23:41:41 -07002180 if (!ret) {
2181 /* bad SQE content or invalid ctrl state */
2182 /* nvmet layer has already called op done to send rsp. */
James Smartc5343202016-12-02 00:28:43 -08002183 return;
2184 }
2185
2186 /* keep a running counter of tail position */
2187 atomic_inc(&fod->queue->sqtail);
2188
James Smartc5343202016-12-02 00:28:43 -08002189 if (fod->total_length) {
2190 ret = nvmet_fc_alloc_tgt_pgs(fod);
2191 if (ret) {
2192 nvmet_req_complete(&fod->req, ret);
2193 return;
2194 }
2195 }
2196 fod->req.sg = fod->data_sg;
2197 fod->req.sg_cnt = fod->data_sg_cnt;
2198 fod->offset = 0;
James Smartc5343202016-12-02 00:28:43 -08002199
2200 if (fod->io_dir == NVMET_FCP_WRITE) {
2201 /* pull the data over before invoking nvmet layer */
2202 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2203 return;
2204 }
2205
2206 /*
2207 * Reads or no data:
2208 *
2209 * can invoke the nvmet_layer now. If read data, cmd completion will
2210 * push the data
2211 */
2212
2213 fod->req.execute(&fod->req);
2214
2215 return;
2216
2217transport_error:
James Smarta97ec512017-04-11 11:32:31 -07002218 nvmet_fc_abort_op(tgtport, fod);
James Smartc5343202016-12-02 00:28:43 -08002219}
2220
2221/*
2222 * Actual processing routine for received FC-NVME LS Requests from the LLD
2223 */
2224static void
2225nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2226{
2227 struct nvmet_fc_fcp_iod *fod =
2228 container_of(work, struct nvmet_fc_fcp_iod, work);
2229 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2230
2231 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2232}
2233
2234/**
2235 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2236 * upon the reception of a NVME FCP CMD IU.
2237 *
2238 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2239 * layer for processing.
2240 *
James Smart0fb228d2017-08-01 15:12:39 -07002241 * The nvmet_fc layer allocates a local job structure (struct
2242 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2243 * CMD IU buffer to the job structure. As such, on a successful
2244 * completion (returns 0), the LLDD may immediately free/reuse
2245 * the CMD IU buffer passed in the call.
James Smartc5343202016-12-02 00:28:43 -08002246 *
James Smart0fb228d2017-08-01 15:12:39 -07002247 * However, in some circumstances, due to the packetized nature of FC
2248 * and the api of the FC LLDD which may issue a hw command to send the
2249 * response, but the LLDD may not get the hw completion for that command
2250 * and upcall the nvmet_fc layer before a new command may be
2251 * asynchronously received - its possible for a command to be received
2252 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2253 * the appearance of more commands received than fits in the sq.
2254 * To alleviate this scenario, a temporary queue is maintained in the
2255 * transport for pending LLDD requests waiting for a queue job structure.
2256 * In these "overrun" cases, a temporary queue element is allocated
2257 * the LLDD request and CMD iu buffer information remembered, and the
2258 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2259 * structure is freed, it is immediately reallocated for anything on the
2260 * pending request list. The LLDDs defer_rcv() callback is called,
2261 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2262 * is then started normally with the transport.
2263 *
2264 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2265 * the completion as successful but must not reuse the CMD IU buffer
2266 * until the LLDD's defer_rcv() callback has been called for the
2267 * corresponding struct nvmefc_tgt_fcp_req pointer.
2268 *
2269 * If there is any other condition in which an error occurs, the
2270 * transport will return a non-zero status indicating the error.
2271 * In all cases other than -EOVERFLOW, the transport has not accepted the
2272 * request and the LLDD should abort the exchange.
James Smartc5343202016-12-02 00:28:43 -08002273 *
2274 * @target_port: pointer to the (registered) target port the FCP CMD IU
James Smart19b58d92017-04-11 11:32:29 -07002275 * was received on.
James Smartc5343202016-12-02 00:28:43 -08002276 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2277 * the exchange corresponding to the FCP Exchange.
2278 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2279 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2280 */
2281int
2282nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2283 struct nvmefc_tgt_fcp_req *fcpreq,
2284 void *cmdiubuf, u32 cmdiubuf_len)
2285{
2286 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2287 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2288 struct nvmet_fc_tgt_queue *queue;
2289 struct nvmet_fc_fcp_iod *fod;
James Smart0fb228d2017-08-01 15:12:39 -07002290 struct nvmet_fc_defer_fcp_req *deferfcp;
2291 unsigned long flags;
James Smartc5343202016-12-02 00:28:43 -08002292
2293 /* validate iu, so the connection id can be used to find the queue */
2294 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2295 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2296 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2297 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2298 return -EIO;
2299
James Smartc5343202016-12-02 00:28:43 -08002300 queue = nvmet_fc_find_target_queue(tgtport,
2301 be64_to_cpu(cmdiu->connection_id));
2302 if (!queue)
2303 return -ENOTCONN;
2304
2305 /*
2306 * note: reference taken by find_target_queue
2307 * After successful fod allocation, the fod will inherit the
2308 * ownership of that reference and will remove the reference
2309 * when the fod is freed.
2310 */
2311
James Smart0fb228d2017-08-01 15:12:39 -07002312 spin_lock_irqsave(&queue->qlock, flags);
2313
James Smartc5343202016-12-02 00:28:43 -08002314 fod = nvmet_fc_alloc_fcp_iod(queue);
James Smart0fb228d2017-08-01 15:12:39 -07002315 if (fod) {
2316 spin_unlock_irqrestore(&queue->qlock, flags);
2317
2318 fcpreq->nvmet_fc_private = fod;
2319 fod->fcpreq = fcpreq;
2320
2321 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2322
2323 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2324
2325 return 0;
2326 }
2327
2328 if (!tgtport->ops->defer_rcv) {
2329 spin_unlock_irqrestore(&queue->qlock, flags);
James Smartc5343202016-12-02 00:28:43 -08002330 /* release the queue lookup reference */
2331 nvmet_fc_tgt_q_put(queue);
2332 return -ENOENT;
2333 }
2334
James Smart0fb228d2017-08-01 15:12:39 -07002335 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2336 struct nvmet_fc_defer_fcp_req, req_list);
2337 if (deferfcp) {
2338 /* Just re-use one that was previously allocated */
2339 list_del(&deferfcp->req_list);
2340 } else {
2341 spin_unlock_irqrestore(&queue->qlock, flags);
James Smartc5343202016-12-02 00:28:43 -08002342
James Smart0fb228d2017-08-01 15:12:39 -07002343 /* Now we need to dynamically allocate one */
2344 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2345 if (!deferfcp) {
2346 /* release the queue lookup reference */
2347 nvmet_fc_tgt_q_put(queue);
2348 return -ENOMEM;
2349 }
2350 spin_lock_irqsave(&queue->qlock, flags);
2351 }
James Smartc5343202016-12-02 00:28:43 -08002352
James Smart0fb228d2017-08-01 15:12:39 -07002353 /* For now, use rspaddr / rsplen to save payload information */
2354 fcpreq->rspaddr = cmdiubuf;
2355 fcpreq->rsplen = cmdiubuf_len;
2356 deferfcp->fcp_req = fcpreq;
2357
2358 /* defer processing till a fod becomes available */
2359 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2360
2361 /* NOTE: the queue lookup reference is still valid */
2362
2363 spin_unlock_irqrestore(&queue->qlock, flags);
2364
2365 return -EOVERFLOW;
James Smartc5343202016-12-02 00:28:43 -08002366}
2367EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2368
James Smarta97ec512017-04-11 11:32:31 -07002369/**
2370 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2371 * upon the reception of an ABTS for a FCP command
2372 *
2373 * Notify the transport that an ABTS has been received for a FCP command
2374 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2375 * LLDD believes the command is still being worked on
2376 * (template_ops->fcp_req_release() has not been called).
2377 *
2378 * The transport will wait for any outstanding work (an op to the LLDD,
2379 * which the lldd should complete with error due to the ABTS; or the
2380 * completion from the nvmet layer of the nvme command), then will
2381 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2382 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2383 * to the ABTS either after return from this function (assuming any
2384 * outstanding op work has been terminated) or upon the callback being
2385 * called.
2386 *
2387 * @target_port: pointer to the (registered) target port the FCP CMD IU
2388 * was received on.
2389 * @fcpreq: pointer to the fcpreq request structure that corresponds
2390 * to the exchange that received the ABTS.
2391 */
2392void
2393nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2394 struct nvmefc_tgt_fcp_req *fcpreq)
2395{
2396 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2397 struct nvmet_fc_tgt_queue *queue;
2398 unsigned long flags;
2399
2400 if (!fod || fod->fcpreq != fcpreq)
2401 /* job appears to have already completed, ignore abort */
2402 return;
2403
2404 queue = fod->queue;
2405
2406 spin_lock_irqsave(&queue->qlock, flags);
2407 if (fod->active) {
2408 /*
2409 * mark as abort. The abort handler, invoked upon completion
2410 * of any work, will detect the aborted status and do the
2411 * callback.
2412 */
2413 spin_lock(&fod->flock);
2414 fod->abort = true;
2415 fod->aborted = true;
2416 spin_unlock(&fod->flock);
2417 }
2418 spin_unlock_irqrestore(&queue->qlock, flags);
2419}
2420EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2421
James Smartc5343202016-12-02 00:28:43 -08002422
2423struct nvmet_fc_traddr {
2424 u64 nn;
2425 u64 pn;
2426};
2427
James Smartc5343202016-12-02 00:28:43 -08002428static int
James Smart9c5358e2017-07-17 13:59:39 -07002429__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
James Smartc5343202016-12-02 00:28:43 -08002430{
James Smartc5343202016-12-02 00:28:43 -08002431 u64 token64;
2432
James Smart9c5358e2017-07-17 13:59:39 -07002433 if (match_u64(sstr, &token64))
2434 return -EINVAL;
2435 *val = token64;
James Smartc5343202016-12-02 00:28:43 -08002436
James Smart9c5358e2017-07-17 13:59:39 -07002437 return 0;
2438}
James Smartc5343202016-12-02 00:28:43 -08002439
James Smart9c5358e2017-07-17 13:59:39 -07002440/*
2441 * This routine validates and extracts the WWN's from the TRADDR string.
2442 * As kernel parsers need the 0x to determine number base, universally
2443 * build string to parse with 0x prefix before parsing name strings.
2444 */
2445static int
2446nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2447{
2448 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2449 substring_t wwn = { name, &name[sizeof(name)-1] };
2450 int nnoffset, pnoffset;
James Smartc5343202016-12-02 00:28:43 -08002451
James Smart9c5358e2017-07-17 13:59:39 -07002452 /* validate it string one of the 2 allowed formats */
2453 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2454 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2455 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2456 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2457 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2458 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2459 NVME_FC_TRADDR_OXNNLEN;
2460 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2461 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2462 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2463 "pn-", NVME_FC_TRADDR_NNLEN))) {
2464 nnoffset = NVME_FC_TRADDR_NNLEN;
2465 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2466 } else
2467 goto out_einval;
2468
2469 name[0] = '0';
2470 name[1] = 'x';
2471 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2472
2473 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2474 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2475 goto out_einval;
2476
2477 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2478 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2479 goto out_einval;
2480
2481 return 0;
2482
2483out_einval:
2484 pr_warn("%s: bad traddr string\n", __func__);
2485 return -EINVAL;
James Smartc5343202016-12-02 00:28:43 -08002486}
2487
2488static int
2489nvmet_fc_add_port(struct nvmet_port *port)
2490{
2491 struct nvmet_fc_tgtport *tgtport;
2492 struct nvmet_fc_traddr traddr = { 0L, 0L };
2493 unsigned long flags;
2494 int ret;
2495
2496 /* validate the address info */
2497 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2498 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2499 return -EINVAL;
2500
2501 /* map the traddr address info to a target port */
2502
James Smart9c5358e2017-07-17 13:59:39 -07002503 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2504 sizeof(port->disc_addr.traddr));
James Smartc5343202016-12-02 00:28:43 -08002505 if (ret)
2506 return ret;
2507
2508 ret = -ENXIO;
2509 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2510 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2511 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2512 (tgtport->fc_target_port.port_name == traddr.pn)) {
2513 /* a FC port can only be 1 nvmet port id */
2514 if (!tgtport->port) {
2515 tgtport->port = port;
2516 port->priv = tgtport;
James Smart568ad512017-04-11 11:32:32 -07002517 nvmet_fc_tgtport_get(tgtport);
James Smartc5343202016-12-02 00:28:43 -08002518 ret = 0;
2519 } else
2520 ret = -EALREADY;
2521 break;
2522 }
2523 }
2524 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2525 return ret;
2526}
2527
2528static void
2529nvmet_fc_remove_port(struct nvmet_port *port)
2530{
2531 struct nvmet_fc_tgtport *tgtport = port->priv;
2532 unsigned long flags;
2533
2534 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2535 if (tgtport->port == port) {
2536 nvmet_fc_tgtport_put(tgtport);
2537 tgtport->port = NULL;
2538 }
2539 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2540}
2541
2542static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2543 .owner = THIS_MODULE,
2544 .type = NVMF_TRTYPE_FC,
2545 .msdbd = 1,
2546 .add_port = nvmet_fc_add_port,
2547 .remove_port = nvmet_fc_remove_port,
2548 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2549 .delete_ctrl = nvmet_fc_delete_ctrl,
2550};
2551
2552static int __init nvmet_fc_init_module(void)
2553{
2554 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2555}
2556
2557static void __exit nvmet_fc_exit_module(void)
2558{
2559 /* sanity check - all lports should be removed */
2560 if (!list_empty(&nvmet_fc_target_list))
2561 pr_warn("%s: targetport list not empty\n", __func__);
2562
2563 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2564
2565 ida_destroy(&nvmet_fc_tgtport_cnt);
2566}
2567
2568module_init(nvmet_fc_init_module);
2569module_exit(nvmet_fc_exit_module);
2570
2571MODULE_LICENSE("GPL v2");