blob: 8dbf5c9d51aad3e64ae424d8256124a21af8a585 [file] [log] [blame]
James Smartd613b6a2017-02-12 13:52:37 -08001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James Smartd080abe2017-02-12 13:52:39 -08003 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
James Smartd613b6a2017-02-12 13:52:37 -08006 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
James Smartd080abe2017-02-12 13:52:39 -08008 * www.broadcom.com *
James Smartd613b6a2017-02-12 13:52:37 -08009 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include <../drivers/nvme/host/nvme.h>
40#include <linux/nvme-fc-driver.h>
James Smartcbc5de12017-12-08 17:18:04 -080041#include <linux/nvme-fc.h>
James Smartd613b6a2017-02-12 13:52:37 -080042
43#include "lpfc_version.h"
44#include "lpfc_hw4.h"
45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
47#include "lpfc_sli4.h"
48#include "lpfc_nl.h"
49#include "lpfc_disc.h"
50#include "lpfc.h"
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
53#include "lpfc_nvmet.h"
54#include "lpfc_logmsg.h"
55#include "lpfc_crtn.h"
56#include "lpfc_vport.h"
James Smart2b65e182017-02-12 13:52:38 -080057#include "lpfc_debugfs.h"
James Smartd613b6a2017-02-12 13:52:37 -080058
59static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
61 dma_addr_t rspbuf,
62 uint16_t rspsize);
63static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
67 uint32_t, uint16_t);
68static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
70 uint32_t, uint16_t);
71static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
73 uint32_t, uint16_t);
74
James Smart86c67372017-04-21 16:05:04 -070075void
76lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
77{
78 unsigned long iflag;
79
James Smarte3246a12017-09-29 17:34:36 -070080 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
James Smart86c67372017-04-21 16:05:04 -070081 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
82 ctxp->oxid, ctxp->flag);
83
84 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
85 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
86 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
87 iflag);
88 return;
89 }
90 ctxp->flag |= LPFC_NVMET_CTX_RLS;
91 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
92 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
93}
94
James Smartd613b6a2017-02-12 13:52:37 -080095/**
96 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
97 * @phba: Pointer to HBA context object.
98 * @cmdwqe: Pointer to driver command WQE object.
99 * @wcqe: Pointer to driver response CQE object.
100 *
101 * The function is called from SLI ring event handler with no
102 * lock held. This function is the completion handler for NVME LS commands
103 * The function frees memory resources used for the NVME commands.
104 **/
105static void
106lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
107 struct lpfc_wcqe_complete *wcqe)
108{
109 struct lpfc_nvmet_tgtport *tgtp;
110 struct nvmefc_tgt_ls_req *rsp;
111 struct lpfc_nvmet_rcv_ctx *ctxp;
112 uint32_t status, result;
113
114 status = bf_get(lpfc_wcqe_c_status, wcqe);
115 result = wcqe->parameter;
James Smartce1b5912017-06-01 21:06:58 -0700116 ctxp = cmdwqe->context2;
117
118 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
119 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
120 "6410 NVMET LS cmpl state mismatch IO x%x: "
121 "%d %d\n",
122 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
123 }
124
James Smartd613b6a2017-02-12 13:52:37 -0800125 if (!phba->targetport)
126 goto out;
127
128 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
129
James Smart4b056682017-12-08 17:18:10 -0800130 if (tgtp) {
131 if (status) {
132 atomic_inc(&tgtp->xmt_ls_rsp_error);
133 if (status == IOERR_ABORT_REQUESTED)
134 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
135 if (bf_get(lpfc_wcqe_c_xb, wcqe))
136 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
137 } else {
138 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
139 }
140 }
James Smartd613b6a2017-02-12 13:52:37 -0800141
142out:
James Smartd613b6a2017-02-12 13:52:37 -0800143 rsp = &ctxp->ctx.ls_req;
144
James Smart2b65e182017-02-12 13:52:38 -0800145 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
146 ctxp->oxid, status, result);
147
James Smartd613b6a2017-02-12 13:52:37 -0800148 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -0700149 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
150 status, result, ctxp->oxid);
James Smartd613b6a2017-02-12 13:52:37 -0800151
152 lpfc_nlp_put(cmdwqe->context1);
153 cmdwqe->context2 = NULL;
154 cmdwqe->context3 = NULL;
155 lpfc_sli_release_iocbq(phba, cmdwqe);
156 rsp->done(rsp);
157 kfree(ctxp);
158}
159
160/**
James Smart6c621a22017-05-15 15:20:45 -0700161 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
James Smartd613b6a2017-02-12 13:52:37 -0800162 * @phba: HBA buffer is associated with
163 * @ctxp: context to clean up
164 * @mp: Buffer to free
165 *
166 * Description: Frees the given DMA buffer in the appropriate way given by
167 * reposting it to its associated RQ so it can be reused.
168 *
169 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
170 *
171 * Returns: None
172 **/
173void
James Smart6c621a22017-05-15 15:20:45 -0700174lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
James Smartd613b6a2017-02-12 13:52:37 -0800175{
James Smarteeeb51d2017-05-16 20:52:29 -0700176#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
James Smart6c621a22017-05-15 15:20:45 -0700177 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
James Smarta8cf5df2017-05-15 15:20:46 -0700178 struct lpfc_nvmet_tgtport *tgtp;
179 struct fc_frame_header *fc_hdr;
180 struct rqb_dmabuf *nvmebuf;
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700181 struct lpfc_nvmet_ctx_info *infop;
James Smarta8cf5df2017-05-15 15:20:46 -0700182 uint32_t *payload;
183 uint32_t size, oxid, sid, rc;
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700184 int cpu;
James Smart6c621a22017-05-15 15:20:45 -0700185 unsigned long iflag;
James Smart86c67372017-04-21 16:05:04 -0700186
James Smart6c621a22017-05-15 15:20:45 -0700187 if (ctxp->txrdy) {
Romain Perier771db5c2017-07-06 10:13:05 +0200188 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
James Smart6c621a22017-05-15 15:20:45 -0700189 ctxp->txrdy_phys);
190 ctxp->txrdy = NULL;
191 ctxp->txrdy_phys = 0;
James Smartd613b6a2017-02-12 13:52:37 -0800192 }
James Smartce1b5912017-06-01 21:06:58 -0700193
194 if (ctxp->state == LPFC_NVMET_STE_FREE) {
195 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
196 "6411 NVMET free, already free IO x%x: %d %d\n",
197 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
198 }
James Smart6c621a22017-05-15 15:20:45 -0700199 ctxp->state = LPFC_NVMET_STE_FREE;
200
James Smarta8cf5df2017-05-15 15:20:46 -0700201 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
202 if (phba->sli4_hba.nvmet_io_wait_cnt) {
James Smarta8cf5df2017-05-15 15:20:46 -0700203 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
204 nvmebuf, struct rqb_dmabuf,
205 hbuf.list);
206 phba->sli4_hba.nvmet_io_wait_cnt--;
207 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
208 iflag);
209
210 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
211 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
212 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
213 payload = (uint32_t *)(nvmebuf->dbuf.virt);
214 size = nvmebuf->bytes_recv;
215 sid = sli4_sid_from_fc_hdr(fc_hdr);
216
217 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
James Smarta8cf5df2017-05-15 15:20:46 -0700218 ctxp->wqeq = NULL;
219 ctxp->txrdy = NULL;
220 ctxp->offset = 0;
221 ctxp->phba = phba;
222 ctxp->size = size;
223 ctxp->oxid = oxid;
224 ctxp->sid = sid;
225 ctxp->state = LPFC_NVMET_STE_RCV;
226 ctxp->entry_cnt = 1;
227 ctxp->flag = 0;
228 ctxp->ctxbuf = ctx_buf;
James Smartcbc5de12017-12-08 17:18:04 -0800229 ctxp->rqb_buffer = (void *)nvmebuf;
James Smarta8cf5df2017-05-15 15:20:46 -0700230 spin_lock_init(&ctxp->ctxlock);
231
232#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700233 if (ctxp->ts_cmd_nvme) {
James Smarta8cf5df2017-05-15 15:20:46 -0700234 ctxp->ts_cmd_nvme = ktime_get_ns();
James Smarta8cf5df2017-05-15 15:20:46 -0700235 ctxp->ts_nvme_data = 0;
236 ctxp->ts_data_wqput = 0;
237 ctxp->ts_isr_data = 0;
238 ctxp->ts_data_nvme = 0;
239 ctxp->ts_nvme_status = 0;
240 ctxp->ts_status_wqput = 0;
241 ctxp->ts_isr_status = 0;
242 ctxp->ts_status_nvme = 0;
243 }
244#endif
245 atomic_inc(&tgtp->rcv_fcp_cmd_in);
246 /*
247 * The calling sequence should be:
248 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
249 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
250 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
251 * the NVME command / FC header is stored.
252 * A buffer has already been reposted for this IO, so just free
253 * the nvmebuf.
254 */
255 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
256 payload, size);
257
258 /* Process FCP command */
259 if (rc == 0) {
260 atomic_inc(&tgtp->rcv_fcp_cmd_out);
261 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
262 return;
263 }
264
James Smartcbc5de12017-12-08 17:18:04 -0800265 /* Processing of FCP command is deferred */
266 if (rc == -EOVERFLOW) {
267 lpfc_nvmeio_data(phba,
268 "NVMET RCV BUSY: xri x%x sz %d "
269 "from %06x\n",
270 oxid, size, sid);
271 /* defer repost rcv buffer till .defer_rcv callback */
272 ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
273 atomic_inc(&tgtp->rcv_fcp_cmd_out);
274 return;
275 }
James Smarta8cf5df2017-05-15 15:20:46 -0700276 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
277 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
278 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
279 ctxp->oxid, rc,
280 atomic_read(&tgtp->rcv_fcp_cmd_in),
281 atomic_read(&tgtp->rcv_fcp_cmd_out),
282 atomic_read(&tgtp->xmt_fcp_release));
283
284 lpfc_nvmet_defer_release(phba, ctxp);
285 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
286 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
287 return;
288 }
289 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
290
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700291 /*
292 * Use the CPU context list, from the MRQ the IO was received on
293 * (ctxp->idx), to save context structure.
294 */
295 cpu = smp_processor_id();
296 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
297 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
298 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
299 infop->nvmet_ctx_list_cnt++;
300 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
James Smarteeeb51d2017-05-16 20:52:29 -0700301#endif
James Smartd613b6a2017-02-12 13:52:37 -0800302}
303
James Smart2b65e182017-02-12 13:52:38 -0800304#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
305static void
306lpfc_nvmet_ktime(struct lpfc_hba *phba,
307 struct lpfc_nvmet_rcv_ctx *ctxp)
308{
309 uint64_t seg1, seg2, seg3, seg4, seg5;
310 uint64_t seg6, seg7, seg8, seg9, seg10;
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700311 uint64_t segsum;
James Smart2b65e182017-02-12 13:52:38 -0800312
313 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
314 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
315 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
316 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
317 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
318 return;
319
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700320 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
321 return;
James Smart2b65e182017-02-12 13:52:38 -0800322 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
323 return;
324 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
325 return;
326 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
327 return;
328 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
329 return;
330 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
331 return;
332 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
333 return;
334 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
335 return;
336 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
337 return;
338 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
339 return;
340 /*
341 * Segment 1 - Time from FCP command received by MSI-X ISR
342 * to FCP command is passed to NVME Layer.
343 * Segment 2 - Time from FCP command payload handed
344 * off to NVME Layer to Driver receives a Command op
345 * from NVME Layer.
346 * Segment 3 - Time from Driver receives a Command op
347 * from NVME Layer to Command is put on WQ.
348 * Segment 4 - Time from Driver WQ put is done
349 * to MSI-X ISR for Command cmpl.
350 * Segment 5 - Time from MSI-X ISR for Command cmpl to
351 * Command cmpl is passed to NVME Layer.
352 * Segment 6 - Time from Command cmpl is passed to NVME
353 * Layer to Driver receives a RSP op from NVME Layer.
354 * Segment 7 - Time from Driver receives a RSP op from
355 * NVME Layer to WQ put is done on TRSP FCP Status.
356 * Segment 8 - Time from Driver WQ put is done on TRSP
357 * FCP Status to MSI-X ISR for TRSP cmpl.
358 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
359 * TRSP cmpl is passed to NVME Layer.
360 * Segment 10 - Time from FCP command received by
361 * MSI-X ISR to command is completed on wire.
362 * (Segments 1 thru 8) for READDATA / WRITEDATA
363 * (Segments 1 thru 4) for READDATA_RSP
364 */
365 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700366 segsum = seg1;
367
368 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
369 if (segsum > seg2)
370 return;
371 seg2 -= segsum;
372 segsum += seg2;
373
374 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
375 if (segsum > seg3)
376 return;
377 seg3 -= segsum;
378 segsum += seg3;
379
380 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
381 if (segsum > seg4)
382 return;
383 seg4 -= segsum;
384 segsum += seg4;
385
386 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
387 if (segsum > seg5)
388 return;
389 seg5 -= segsum;
390 segsum += seg5;
391
James Smart2b65e182017-02-12 13:52:38 -0800392
393 /* For auto rsp commands seg6 thru seg10 will be 0 */
394 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700395 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
396 if (segsum > seg6)
397 return;
398 seg6 -= segsum;
399 segsum += seg6;
400
401 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
402 if (segsum > seg7)
403 return;
404 seg7 -= segsum;
405 segsum += seg7;
406
407 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
408 if (segsum > seg8)
409 return;
410 seg8 -= segsum;
411 segsum += seg8;
412
413 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
414 if (segsum > seg9)
415 return;
416 seg9 -= segsum;
417 segsum += seg9;
418
419 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
420 return;
James Smart2b65e182017-02-12 13:52:38 -0800421 seg10 = (ctxp->ts_isr_status -
422 ctxp->ts_isr_cmd);
423 } else {
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700424 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
425 return;
James Smart2b65e182017-02-12 13:52:38 -0800426 seg6 = 0;
427 seg7 = 0;
428 seg8 = 0;
429 seg9 = 0;
430 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
431 }
432
433 phba->ktime_seg1_total += seg1;
434 if (seg1 < phba->ktime_seg1_min)
435 phba->ktime_seg1_min = seg1;
436 else if (seg1 > phba->ktime_seg1_max)
437 phba->ktime_seg1_max = seg1;
438
439 phba->ktime_seg2_total += seg2;
440 if (seg2 < phba->ktime_seg2_min)
441 phba->ktime_seg2_min = seg2;
442 else if (seg2 > phba->ktime_seg2_max)
443 phba->ktime_seg2_max = seg2;
444
445 phba->ktime_seg3_total += seg3;
446 if (seg3 < phba->ktime_seg3_min)
447 phba->ktime_seg3_min = seg3;
448 else if (seg3 > phba->ktime_seg3_max)
449 phba->ktime_seg3_max = seg3;
450
451 phba->ktime_seg4_total += seg4;
452 if (seg4 < phba->ktime_seg4_min)
453 phba->ktime_seg4_min = seg4;
454 else if (seg4 > phba->ktime_seg4_max)
455 phba->ktime_seg4_max = seg4;
456
457 phba->ktime_seg5_total += seg5;
458 if (seg5 < phba->ktime_seg5_min)
459 phba->ktime_seg5_min = seg5;
460 else if (seg5 > phba->ktime_seg5_max)
461 phba->ktime_seg5_max = seg5;
462
463 phba->ktime_data_samples++;
464 if (!seg6)
465 goto out;
466
467 phba->ktime_seg6_total += seg6;
468 if (seg6 < phba->ktime_seg6_min)
469 phba->ktime_seg6_min = seg6;
470 else if (seg6 > phba->ktime_seg6_max)
471 phba->ktime_seg6_max = seg6;
472
473 phba->ktime_seg7_total += seg7;
474 if (seg7 < phba->ktime_seg7_min)
475 phba->ktime_seg7_min = seg7;
476 else if (seg7 > phba->ktime_seg7_max)
477 phba->ktime_seg7_max = seg7;
478
479 phba->ktime_seg8_total += seg8;
480 if (seg8 < phba->ktime_seg8_min)
481 phba->ktime_seg8_min = seg8;
482 else if (seg8 > phba->ktime_seg8_max)
483 phba->ktime_seg8_max = seg8;
484
485 phba->ktime_seg9_total += seg9;
486 if (seg9 < phba->ktime_seg9_min)
487 phba->ktime_seg9_min = seg9;
488 else if (seg9 > phba->ktime_seg9_max)
489 phba->ktime_seg9_max = seg9;
490out:
491 phba->ktime_seg10_total += seg10;
492 if (seg10 < phba->ktime_seg10_min)
493 phba->ktime_seg10_min = seg10;
494 else if (seg10 > phba->ktime_seg10_max)
495 phba->ktime_seg10_max = seg10;
496 phba->ktime_status_samples++;
497}
498#endif
499
James Smartd613b6a2017-02-12 13:52:37 -0800500/**
501 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
502 * @phba: Pointer to HBA context object.
503 * @cmdwqe: Pointer to driver command WQE object.
504 * @wcqe: Pointer to driver response CQE object.
505 *
506 * The function is called from SLI ring event handler with no
507 * lock held. This function is the completion handler for NVME FCP commands
508 * The function frees memory resources used for the NVME commands.
509 **/
510static void
511lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
512 struct lpfc_wcqe_complete *wcqe)
513{
514 struct lpfc_nvmet_tgtport *tgtp;
515 struct nvmefc_tgt_fcp_req *rsp;
516 struct lpfc_nvmet_rcv_ctx *ctxp;
James Smarte3246a12017-09-29 17:34:36 -0700517 uint32_t status, result, op, start_clean, logerr;
James Smart2b65e182017-02-12 13:52:38 -0800518#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
519 uint32_t id;
520#endif
James Smartd613b6a2017-02-12 13:52:37 -0800521
522 ctxp = cmdwqe->context2;
James Smart86c67372017-04-21 16:05:04 -0700523 ctxp->flag &= ~LPFC_NVMET_IO_INP;
524
James Smartd613b6a2017-02-12 13:52:37 -0800525 rsp = &ctxp->ctx.fcp_req;
526 op = rsp->op;
James Smartd613b6a2017-02-12 13:52:37 -0800527
528 status = bf_get(lpfc_wcqe_c_status, wcqe);
529 result = wcqe->parameter;
530
James Smart86c67372017-04-21 16:05:04 -0700531 if (phba->targetport)
532 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
533 else
534 tgtp = NULL;
James Smartd613b6a2017-02-12 13:52:37 -0800535
James Smart2b65e182017-02-12 13:52:38 -0800536 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
537 ctxp->oxid, op, status);
538
James Smartd613b6a2017-02-12 13:52:37 -0800539 if (status) {
540 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
541 rsp->transferred_length = 0;
James Smart4b056682017-12-08 17:18:10 -0800542 if (tgtp) {
James Smart86c67372017-04-21 16:05:04 -0700543 atomic_inc(&tgtp->xmt_fcp_rsp_error);
James Smart4b056682017-12-08 17:18:10 -0800544 if (status == IOERR_ABORT_REQUESTED)
545 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
546 }
James Smart86c67372017-04-21 16:05:04 -0700547
James Smarte3246a12017-09-29 17:34:36 -0700548 logerr = LOG_NVME_IOERR;
549
James Smart86c67372017-04-21 16:05:04 -0700550 /* pick up SLI4 exhange busy condition */
551 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
552 ctxp->flag |= LPFC_NVMET_XBUSY;
James Smarte3246a12017-09-29 17:34:36 -0700553 logerr |= LOG_NVME_ABTS;
James Smart4b056682017-12-08 17:18:10 -0800554 if (tgtp)
555 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
James Smart86c67372017-04-21 16:05:04 -0700556
James Smart86c67372017-04-21 16:05:04 -0700557 } else {
558 ctxp->flag &= ~LPFC_NVMET_XBUSY;
559 }
560
James Smarte3246a12017-09-29 17:34:36 -0700561 lpfc_printf_log(phba, KERN_INFO, logerr,
562 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
563 ctxp->oxid, status, result, ctxp->flag);
564
James Smartd613b6a2017-02-12 13:52:37 -0800565 } else {
566 rsp->fcp_error = NVME_SC_SUCCESS;
567 if (op == NVMET_FCOP_RSP)
568 rsp->transferred_length = rsp->rsplen;
569 else
570 rsp->transferred_length = rsp->transfer_length;
James Smart86c67372017-04-21 16:05:04 -0700571 if (tgtp)
572 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
James Smartd613b6a2017-02-12 13:52:37 -0800573 }
574
James Smartd613b6a2017-02-12 13:52:37 -0800575 if ((op == NVMET_FCOP_READDATA_RSP) ||
576 (op == NVMET_FCOP_RSP)) {
577 /* Sanity check */
578 ctxp->state = LPFC_NVMET_STE_DONE;
579 ctxp->entry_cnt++;
James Smart86c67372017-04-21 16:05:04 -0700580
James Smart2b65e182017-02-12 13:52:38 -0800581#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700582 if (ctxp->ts_cmd_nvme) {
James Smart2b65e182017-02-12 13:52:38 -0800583 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
584 ctxp->ts_isr_data =
585 cmdwqe->isr_timestamp;
586 ctxp->ts_data_nvme =
587 ktime_get_ns();
588 ctxp->ts_nvme_status =
589 ctxp->ts_data_nvme;
590 ctxp->ts_status_wqput =
591 ctxp->ts_data_nvme;
592 ctxp->ts_isr_status =
593 ctxp->ts_data_nvme;
594 ctxp->ts_status_nvme =
595 ctxp->ts_data_nvme;
596 } else {
597 ctxp->ts_isr_status =
598 cmdwqe->isr_timestamp;
599 ctxp->ts_status_nvme =
600 ktime_get_ns();
601 }
602 }
603 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
604 id = smp_processor_id();
605 if (ctxp->cpu != id)
606 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
607 "6703 CPU Check cmpl: "
608 "cpu %d expect %d\n",
609 id, ctxp->cpu);
610 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
611 phba->cpucheck_cmpl_io[id]++;
612 }
613#endif
James Smartd613b6a2017-02-12 13:52:37 -0800614 rsp->done(rsp);
James Smart2b65e182017-02-12 13:52:38 -0800615#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700616 if (ctxp->ts_cmd_nvme)
James Smart2b65e182017-02-12 13:52:38 -0800617 lpfc_nvmet_ktime(phba, ctxp);
618#endif
James Smart19b58d92017-04-11 11:32:29 -0700619 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
James Smartd613b6a2017-02-12 13:52:37 -0800620 } else {
621 ctxp->entry_cnt++;
Dick Kennedyc6e0c922017-08-23 16:55:43 -0700622 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
James Smartd613b6a2017-02-12 13:52:37 -0800623 memset(((char *)cmdwqe) + start_clean, 0,
624 (sizeof(struct lpfc_iocbq) - start_clean));
James Smart2b65e182017-02-12 13:52:38 -0800625#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700626 if (ctxp->ts_cmd_nvme) {
James Smart2b65e182017-02-12 13:52:38 -0800627 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
628 ctxp->ts_data_nvme = ktime_get_ns();
629 }
630 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
631 id = smp_processor_id();
632 if (ctxp->cpu != id)
633 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
634 "6704 CPU Check cmdcmpl: "
635 "cpu %d expect %d\n",
636 id, ctxp->cpu);
637 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
638 phba->cpucheck_ccmpl_io[id]++;
639 }
640#endif
James Smartd613b6a2017-02-12 13:52:37 -0800641 rsp->done(rsp);
642 }
643}
644
645static int
646lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
647 struct nvmefc_tgt_ls_req *rsp)
648{
649 struct lpfc_nvmet_rcv_ctx *ctxp =
650 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
651 struct lpfc_hba *phba = ctxp->phba;
652 struct hbq_dmabuf *nvmebuf =
653 (struct hbq_dmabuf *)ctxp->rqb_buffer;
654 struct lpfc_iocbq *nvmewqeq;
655 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
656 struct lpfc_dmabuf dmabuf;
657 struct ulp_bde64 bpl;
658 int rc;
659
Dick Kennedy1abcb3712017-09-29 17:34:45 -0700660 if (phba->pport->load_flag & FC_UNLOADING)
661 return -ENODEV;
662
James Smart3386f4bd2017-11-20 16:00:41 -0800663 if (phba->pport->load_flag & FC_UNLOADING)
664 return -ENODEV;
665
James Smartd613b6a2017-02-12 13:52:37 -0800666 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -0700667 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
668
669 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
670 (ctxp->entry_cnt != 1)) {
671 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
672 "6412 NVMET LS rsp state mismatch "
673 "oxid x%x: %d %d\n",
674 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
675 }
676 ctxp->state = LPFC_NVMET_STE_LS_RSP;
677 ctxp->entry_cnt++;
James Smartd613b6a2017-02-12 13:52:37 -0800678
679 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
680 rsp->rsplen);
681 if (nvmewqeq == NULL) {
682 atomic_inc(&nvmep->xmt_ls_drop);
683 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
684 "6150 LS Drop IO x%x: Prep\n",
685 ctxp->oxid);
686 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
James Smart547077a2017-05-15 15:20:40 -0700687 atomic_inc(&nvmep->xmt_ls_abort);
James Smartd613b6a2017-02-12 13:52:37 -0800688 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
689 ctxp->sid, ctxp->oxid);
690 return -ENOMEM;
691 }
692
693 /* Save numBdes for bpl2sgl */
694 nvmewqeq->rsvd2 = 1;
695 nvmewqeq->hba_wqidx = 0;
696 nvmewqeq->context3 = &dmabuf;
697 dmabuf.virt = &bpl;
698 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
699 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
700 bpl.tus.f.bdeSize = rsp->rsplen;
701 bpl.tus.f.bdeFlags = 0;
702 bpl.tus.w = le32_to_cpu(bpl.tus.w);
703
704 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
705 nvmewqeq->iocb_cmpl = NULL;
706 nvmewqeq->context2 = ctxp;
707
James Smart2b65e182017-02-12 13:52:38 -0800708 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
709 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
710
James Smartd613b6a2017-02-12 13:52:37 -0800711 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
712 if (rc == WQE_SUCCESS) {
713 /*
714 * Okay to repost buffer here, but wait till cmpl
715 * before freeing ctxp and iocbq.
716 */
717 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
718 ctxp->rqb_buffer = 0;
719 atomic_inc(&nvmep->xmt_ls_rsp);
720 return 0;
721 }
722 /* Give back resources */
723 atomic_inc(&nvmep->xmt_ls_drop);
724 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
725 "6151 LS Drop IO x%x: Issue %d\n",
726 ctxp->oxid, rc);
727
728 lpfc_nlp_put(nvmewqeq->context1);
729
730 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
James Smart547077a2017-05-15 15:20:40 -0700731 atomic_inc(&nvmep->xmt_ls_abort);
James Smartd613b6a2017-02-12 13:52:37 -0800732 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
733 return -ENXIO;
734}
735
736static int
737lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
738 struct nvmefc_tgt_fcp_req *rsp)
739{
740 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
741 struct lpfc_nvmet_rcv_ctx *ctxp =
742 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
743 struct lpfc_hba *phba = ctxp->phba;
744 struct lpfc_iocbq *nvmewqeq;
Arnd Bergmann223b78e2017-03-23 15:53:45 +0100745 int rc;
James Smart2b65e182017-02-12 13:52:38 -0800746
Dick Kennedy1abcb3712017-09-29 17:34:45 -0700747 if (phba->pport->load_flag & FC_UNLOADING) {
748 rc = -ENODEV;
749 goto aerr;
750 }
751
James Smart3386f4bd2017-11-20 16:00:41 -0800752 if (phba->pport->load_flag & FC_UNLOADING) {
753 rc = -ENODEV;
754 goto aerr;
755 }
756
James Smart2b65e182017-02-12 13:52:38 -0800757#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700758 if (ctxp->ts_cmd_nvme) {
James Smart2b65e182017-02-12 13:52:38 -0800759 if (rsp->op == NVMET_FCOP_RSP)
760 ctxp->ts_nvme_status = ktime_get_ns();
761 else
762 ctxp->ts_nvme_data = ktime_get_ns();
763 }
764 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
Arnd Bergmann223b78e2017-03-23 15:53:45 +0100765 int id = smp_processor_id();
James Smart2b65e182017-02-12 13:52:38 -0800766 ctxp->cpu = id;
767 if (id < LPFC_CHECK_CPU_CNT)
768 phba->cpucheck_xmt_io[id]++;
769 if (rsp->hwqid != id) {
770 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
771 "6705 CPU Check OP: "
772 "cpu %d expect %d\n",
773 id, rsp->hwqid);
774 ctxp->cpu = rsp->hwqid;
775 }
776 }
777#endif
James Smartd613b6a2017-02-12 13:52:37 -0800778
James Smartd613b6a2017-02-12 13:52:37 -0800779 /* Sanity check */
James Smart86c67372017-04-21 16:05:04 -0700780 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
781 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
James Smartd613b6a2017-02-12 13:52:37 -0800782 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
783 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smart86c67372017-04-21 16:05:04 -0700784 "6102 IO xri x%x aborted\n",
James Smartd613b6a2017-02-12 13:52:37 -0800785 ctxp->oxid);
James Smarta5068b42017-03-04 09:30:28 -0800786 rc = -ENXIO;
James Smartd613b6a2017-02-12 13:52:37 -0800787 goto aerr;
788 }
789
790 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
791 if (nvmewqeq == NULL) {
792 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
793 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
794 "6152 FCP Drop IO x%x: Prep\n",
795 ctxp->oxid);
James Smarta5068b42017-03-04 09:30:28 -0800796 rc = -ENXIO;
James Smartd613b6a2017-02-12 13:52:37 -0800797 goto aerr;
798 }
799
800 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
801 nvmewqeq->iocb_cmpl = NULL;
802 nvmewqeq->context2 = ctxp;
803 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
804 ctxp->wqeq->hba_wqidx = rsp->hwqid;
805
James Smart2b65e182017-02-12 13:52:38 -0800806 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
807 ctxp->oxid, rsp->op, rsp->rsplen);
808
James Smart61f3d4b2017-05-15 15:20:41 -0700809 ctxp->flag |= LPFC_NVMET_IO_INP;
James Smartd613b6a2017-02-12 13:52:37 -0800810 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
James Smartd613b6a2017-02-12 13:52:37 -0800811 if (rc == WQE_SUCCESS) {
James Smart2b65e182017-02-12 13:52:38 -0800812#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -0700813 if (!ctxp->ts_cmd_nvme)
James Smart2b65e182017-02-12 13:52:38 -0800814 return 0;
815 if (rsp->op == NVMET_FCOP_RSP)
816 ctxp->ts_status_wqput = ktime_get_ns();
817 else
818 ctxp->ts_data_wqput = ktime_get_ns();
819#endif
James Smartd613b6a2017-02-12 13:52:37 -0800820 return 0;
821 }
822
823 /* Give back resources */
824 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
825 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
826 "6153 FCP Drop IO x%x: Issue: %d\n",
827 ctxp->oxid, rc);
828
829 ctxp->wqeq->hba_wqidx = 0;
830 nvmewqeq->context2 = NULL;
831 nvmewqeq->context3 = NULL;
James Smarta5068b42017-03-04 09:30:28 -0800832 rc = -EBUSY;
James Smartd613b6a2017-02-12 13:52:37 -0800833aerr:
James Smarta5068b42017-03-04 09:30:28 -0800834 return rc;
James Smartd613b6a2017-02-12 13:52:37 -0800835}
836
837static void
838lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
839{
840 struct lpfc_nvmet_tgtport *tport = targetport->private;
841
842 /* release any threads waiting for the unreg to complete */
843 complete(&tport->tport_unreg_done);
844}
845
James Smart19b58d92017-04-11 11:32:29 -0700846static void
James Smarta97ec512017-04-11 11:32:31 -0700847lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
848 struct nvmefc_tgt_fcp_req *req)
849{
850 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
851 struct lpfc_nvmet_rcv_ctx *ctxp =
852 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
853 struct lpfc_hba *phba = ctxp->phba;
James Smart86c67372017-04-21 16:05:04 -0700854 unsigned long flags;
James Smarta97ec512017-04-11 11:32:31 -0700855
Dick Kennedy1abcb3712017-09-29 17:34:45 -0700856 if (phba->pport->load_flag & FC_UNLOADING)
857 return;
858
James Smart3386f4bd2017-11-20 16:00:41 -0800859 if (phba->pport->load_flag & FC_UNLOADING)
860 return;
861
James Smarta97ec512017-04-11 11:32:31 -0700862 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
James Smartce1b5912017-06-01 21:06:58 -0700863 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
864 ctxp->oxid, ctxp->flag, ctxp->state);
James Smarta97ec512017-04-11 11:32:31 -0700865
James Smartce1b5912017-06-01 21:06:58 -0700866 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
867 ctxp->oxid, ctxp->flag, ctxp->state);
James Smarta97ec512017-04-11 11:32:31 -0700868
869 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
James Smartce1b5912017-06-01 21:06:58 -0700870
James Smart86c67372017-04-21 16:05:04 -0700871 spin_lock_irqsave(&ctxp->ctxlock, flags);
James Smartc578f6f2017-09-29 17:34:37 -0700872 ctxp->state = LPFC_NVMET_STE_ABORT;
James Smart86c67372017-04-21 16:05:04 -0700873
874 /* Since iaab/iaar are NOT set, we need to check
875 * if the firmware is in process of aborting IO
876 */
877 if (ctxp->flag & LPFC_NVMET_XBUSY) {
878 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
879 return;
880 }
James Smarta97ec512017-04-11 11:32:31 -0700881 ctxp->flag |= LPFC_NVMET_ABORT_OP;
James Smartce1b5912017-06-01 21:06:58 -0700882
883 /* An state of LPFC_NVMET_STE_RCV means we have just received
884 * the NVME command and have not started processing it.
885 * (by issuing any IO WQEs on this exchange yet)
886 */
887 if (ctxp->state == LPFC_NVMET_STE_RCV)
James Smarta97ec512017-04-11 11:32:31 -0700888 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
889 ctxp->oxid);
James Smartce1b5912017-06-01 21:06:58 -0700890 else
891 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
892 ctxp->oxid);
James Smart86c67372017-04-21 16:05:04 -0700893 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
James Smarta97ec512017-04-11 11:32:31 -0700894}
895
896static void
James Smart19b58d92017-04-11 11:32:29 -0700897lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
898 struct nvmefc_tgt_fcp_req *rsp)
899{
James Smart547077a2017-05-15 15:20:40 -0700900 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
James Smart19b58d92017-04-11 11:32:29 -0700901 struct lpfc_nvmet_rcv_ctx *ctxp =
902 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
903 struct lpfc_hba *phba = ctxp->phba;
904 unsigned long flags;
905 bool aborting = false;
906
James Smartce1b5912017-06-01 21:06:58 -0700907 if (ctxp->state != LPFC_NVMET_STE_DONE &&
908 ctxp->state != LPFC_NVMET_STE_ABORT) {
909 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
910 "6413 NVMET release bad state %d %d oxid x%x\n",
911 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
912 }
913
James Smart19b58d92017-04-11 11:32:29 -0700914 spin_lock_irqsave(&ctxp->ctxlock, flags);
James Smart86c67372017-04-21 16:05:04 -0700915 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
916 (ctxp->flag & LPFC_NVMET_XBUSY)) {
James Smart19b58d92017-04-11 11:32:29 -0700917 aborting = true;
James Smart86c67372017-04-21 16:05:04 -0700918 /* let the abort path do the real release */
919 lpfc_nvmet_defer_release(phba, ctxp);
James Smart19b58d92017-04-11 11:32:29 -0700920 }
921 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
922
Arnd Bergmann90943672017-05-19 10:04:31 +0200923 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
924 ctxp->state, aborting);
James Smart19b58d92017-04-11 11:32:29 -0700925
James Smart547077a2017-05-15 15:20:40 -0700926 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
927
James Smart86c67372017-04-21 16:05:04 -0700928 if (aborting)
929 return;
930
James Smart6c621a22017-05-15 15:20:45 -0700931 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
James Smart19b58d92017-04-11 11:32:29 -0700932}
933
James Smart50738422017-08-01 15:12:40 -0700934static void
935lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
936 struct nvmefc_tgt_fcp_req *rsp)
937{
938 struct lpfc_nvmet_tgtport *tgtp;
939 struct lpfc_nvmet_rcv_ctx *ctxp =
940 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
941 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
942 struct lpfc_hba *phba = ctxp->phba;
943
944 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
945 ctxp->oxid, ctxp->size, smp_processor_id());
946
947 tgtp = phba->targetport->private;
948 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
James Smartcbc5de12017-12-08 17:18:04 -0800949 if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
950 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
951 else
952 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
953 ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
James Smart50738422017-08-01 15:12:40 -0700954}
955
James Smartd613b6a2017-02-12 13:52:37 -0800956static struct nvmet_fc_target_template lpfc_tgttemplate = {
957 .targetport_delete = lpfc_nvmet_targetport_delete,
958 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
959 .fcp_op = lpfc_nvmet_xmt_fcp_op,
James Smarta97ec512017-04-11 11:32:31 -0700960 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
James Smart19b58d92017-04-11 11:32:29 -0700961 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
James Smart50738422017-08-01 15:12:40 -0700962 .defer_rcv = lpfc_nvmet_defer_rcv,
James Smartd613b6a2017-02-12 13:52:37 -0800963
964 .max_hw_queues = 1,
965 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
966 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
967 .dma_boundary = 0xFFFFFFFF,
968
969 /* optional features */
970 .target_features = 0,
971 /* sizes of additional private data for data structures */
972 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
973};
974
Colin Ian King39aa23f2017-05-18 10:35:24 +0100975static void
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700976__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
977 struct lpfc_nvmet_ctx_info *infop)
James Smart6c621a22017-05-15 15:20:45 -0700978{
979 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
980 unsigned long flags;
981
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700982 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
James Smart966bb5b2017-06-15 22:56:45 -0700983 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700984 &infop->nvmet_ctx_list, list) {
Dan Carpenterc4031db72017-06-30 11:02:51 +0300985 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
James Smart6c621a22017-05-15 15:20:45 -0700986 list_del_init(&ctx_buf->list);
Dan Carpenterc4031db72017-06-30 11:02:51 +0300987 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700988
989 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
James Smart6c621a22017-05-15 15:20:45 -0700990 ctx_buf->sglq->state = SGL_FREED;
991 ctx_buf->sglq->ndlp = NULL;
992
Dan Carpenterc4031db72017-06-30 11:02:51 +0300993 spin_lock(&phba->sli4_hba.sgl_list_lock);
James Smart6c621a22017-05-15 15:20:45 -0700994 list_add_tail(&ctx_buf->sglq->list,
Dick Kennedy66d7ce92017-08-23 16:55:42 -0700995 &phba->sli4_hba.lpfc_nvmet_sgl_list);
Dan Carpenterc4031db72017-06-30 11:02:51 +0300996 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart6c621a22017-05-15 15:20:45 -0700997
998 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
999 kfree(ctx_buf->context);
1000 }
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001001 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1002}
James Smart966bb5b2017-06-15 22:56:45 -07001003
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001004static void
1005lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1006{
1007 struct lpfc_nvmet_ctx_info *infop;
1008 int i, j;
James Smart966bb5b2017-06-15 22:56:45 -07001009
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001010 /* The first context list, MRQ 0 CPU 0 */
1011 infop = phba->sli4_hba.nvmet_ctx_info;
1012 if (!infop)
1013 return;
1014
1015 /* Cycle the the entire CPU context list for every MRQ */
1016 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1017 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1018 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1019 infop++; /* next */
1020 }
James Smart966bb5b2017-06-15 22:56:45 -07001021 }
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001022 kfree(phba->sli4_hba.nvmet_ctx_info);
1023 phba->sli4_hba.nvmet_ctx_info = NULL;
James Smart6c621a22017-05-15 15:20:45 -07001024}
1025
Colin Ian King39aa23f2017-05-18 10:35:24 +01001026static int
James Smart6c621a22017-05-15 15:20:45 -07001027lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1028{
1029 struct lpfc_nvmet_ctxbuf *ctx_buf;
1030 struct lpfc_iocbq *nvmewqe;
1031 union lpfc_wqe128 *wqe;
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001032 struct lpfc_nvmet_ctx_info *last_infop;
1033 struct lpfc_nvmet_ctx_info *infop;
1034 int i, j, idx;
James Smart6c621a22017-05-15 15:20:45 -07001035
1036 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1037 "6403 Allocate NVMET resources for %d XRIs\n",
1038 phba->sli4_hba.nvmet_xri_cnt);
1039
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001040 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1041 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1042 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1043 if (!phba->sli4_hba.nvmet_ctx_info) {
1044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1045 "6419 Failed allocate memory for "
1046 "nvmet context lists\n");
1047 return -ENOMEM;
1048 }
1049
1050 /*
1051 * Assuming X CPUs in the system, and Y MRQs, allocate some
1052 * lpfc_nvmet_ctx_info structures as follows:
1053 *
1054 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1055 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1056 * ...
1057 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1058 *
1059 * Each line represents a MRQ "silo" containing an entry for
1060 * every CPU.
1061 *
1062 * MRQ X is initially assumed to be associated with CPU X, thus
1063 * contexts are initially distributed across all MRQs using
1064 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1065 * freed, the are freed to the MRQ silo based on the CPU number
1066 * of the IO completion. Thus a context that was allocated for MRQ A
1067 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1068 */
1069 infop = phba->sli4_hba.nvmet_ctx_info;
1070 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1071 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1072 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1073 spin_lock_init(&infop->nvmet_ctx_list_lock);
1074 infop->nvmet_ctx_list_cnt = 0;
1075 infop++;
1076 }
1077 }
1078
1079 /*
1080 * Setup the next CPU context info ptr for each MRQ.
1081 * MRQ 0 will cycle thru CPUs 0 - X separately from
1082 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1083 */
1084 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1085 last_infop = lpfc_get_ctx_list(phba, 0, j);
1086 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1087 infop = lpfc_get_ctx_list(phba, i, j);
1088 infop->nvmet_ctx_next_cpu = last_infop;
1089 last_infop = infop;
1090 }
1091 }
1092
James Smart6c621a22017-05-15 15:20:45 -07001093 /* For all nvmet xris, allocate resources needed to process a
1094 * received command on a per xri basis.
1095 */
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001096 idx = 0;
James Smart6c621a22017-05-15 15:20:45 -07001097 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1098 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1099 if (!ctx_buf) {
1100 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1101 "6404 Ran out of memory for NVMET\n");
1102 return -ENOMEM;
1103 }
1104
1105 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1106 GFP_KERNEL);
1107 if (!ctx_buf->context) {
1108 kfree(ctx_buf);
1109 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1110 "6405 Ran out of NVMET "
1111 "context memory\n");
1112 return -ENOMEM;
1113 }
1114 ctx_buf->context->ctxbuf = ctx_buf;
James Smartce1b5912017-06-01 21:06:58 -07001115 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
James Smart6c621a22017-05-15 15:20:45 -07001116
1117 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1118 if (!ctx_buf->iocbq) {
1119 kfree(ctx_buf->context);
1120 kfree(ctx_buf);
1121 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1122 "6406 Ran out of NVMET iocb/WQEs\n");
1123 return -ENOMEM;
1124 }
1125 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1126 nvmewqe = ctx_buf->iocbq;
1127 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1128 /* Initialize WQE */
1129 memset(wqe, 0, sizeof(union lpfc_wqe));
1130 /* Word 7 */
1131 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
1132 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
James Smart6c621a22017-05-15 15:20:45 -07001133 /* Word 10 */
1134 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1135 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
1136 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
1137
1138 ctx_buf->iocbq->context1 = NULL;
1139 spin_lock(&phba->sli4_hba.sgl_list_lock);
1140 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1141 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1142 if (!ctx_buf->sglq) {
1143 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1144 kfree(ctx_buf->context);
1145 kfree(ctx_buf);
1146 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1147 "6407 Ran out of NVMET XRIs\n");
1148 return -ENOMEM;
1149 }
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001150
1151 /*
1152 * Add ctx to MRQidx context list. Our initial assumption
1153 * is MRQidx will be associated with CPUidx. This association
1154 * can change on the fly.
1155 */
1156 infop = lpfc_get_ctx_list(phba, idx, idx);
1157 spin_lock(&infop->nvmet_ctx_list_lock);
1158 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1159 infop->nvmet_ctx_list_cnt++;
1160 spin_unlock(&infop->nvmet_ctx_list_lock);
1161
1162 /* Spread ctx structures evenly across all MRQs */
1163 idx++;
1164 if (idx >= phba->cfg_nvmet_mrq)
1165 idx = 0;
James Smart6c621a22017-05-15 15:20:45 -07001166 }
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001167
1168 infop = phba->sli4_hba.nvmet_ctx_info;
1169 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1170 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1171 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1172 "6408 TOTAL NVMET ctx for CPU %d "
1173 "MRQ %d: cnt %d nextcpu %p\n",
1174 i, j, infop->nvmet_ctx_list_cnt,
1175 infop->nvmet_ctx_next_cpu);
1176 infop++;
1177 }
1178 }
James Smart6c621a22017-05-15 15:20:45 -07001179 return 0;
1180}
1181
James Smartd613b6a2017-02-12 13:52:37 -08001182int
1183lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1184{
1185 struct lpfc_vport *vport = phba->pport;
1186 struct lpfc_nvmet_tgtport *tgtp;
1187 struct nvmet_fc_port_info pinfo;
James Smart6c621a22017-05-15 15:20:45 -07001188 int error;
James Smartd613b6a2017-02-12 13:52:37 -08001189
1190 if (phba->targetport)
1191 return 0;
1192
James Smart6c621a22017-05-15 15:20:45 -07001193 error = lpfc_nvmet_setup_io_context(phba);
1194 if (error)
1195 return error;
1196
James Smartd613b6a2017-02-12 13:52:37 -08001197 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1198 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1199 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1200 pinfo.port_id = vport->fc_myDID;
1201
James Smart4d4c4a42017-04-21 16:05:01 -07001202 /* Limit to LPFC_MAX_NVME_SEG_CNT.
1203 * For now need + 1 to get around NVME transport logic.
1204 */
1205 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1206 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1207 "6400 Reducing sg segment cnt to %d\n",
1208 LPFC_MAX_NVME_SEG_CNT);
1209 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1210 } else {
1211 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1212 }
1213 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
James Smartd613b6a2017-02-12 13:52:37 -08001214 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
Dick Kennedyf485c182017-09-29 17:34:34 -07001215 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
James Smartd613b6a2017-02-12 13:52:37 -08001216
James Smart7d708032017-03-08 14:36:01 -08001217#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
James Smartd613b6a2017-02-12 13:52:37 -08001218 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1219 &phba->pcidev->dev,
1220 &phba->targetport);
James Smart166d7212017-03-04 09:30:33 -08001221#else
James Smart6c621a22017-05-15 15:20:45 -07001222 error = -ENOENT;
James Smart166d7212017-03-04 09:30:33 -08001223#endif
James Smartd613b6a2017-02-12 13:52:37 -08001224 if (error) {
1225 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
Dick Kennedye7981a22017-09-29 17:34:39 -07001226 "6025 Cannot register NVME targetport x%x: "
1227 "portnm %llx nodenm %llx segs %d qs %d\n",
1228 error,
1229 pinfo.port_name, pinfo.node_name,
1230 lpfc_tgttemplate.max_sgl_segments,
1231 lpfc_tgttemplate.max_hw_queues);
James Smartd613b6a2017-02-12 13:52:37 -08001232 phba->targetport = NULL;
Dick Kennedye7981a22017-09-29 17:34:39 -07001233 phba->nvmet_support = 0;
James Smart6c621a22017-05-15 15:20:45 -07001234
1235 lpfc_nvmet_cleanup_io_context(phba);
1236
James Smartd613b6a2017-02-12 13:52:37 -08001237 } else {
1238 tgtp = (struct lpfc_nvmet_tgtport *)
1239 phba->targetport->private;
1240 tgtp->phba = phba;
1241
1242 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1243 "6026 Registered NVME "
1244 "targetport: %p, private %p "
Dick Kennedye7981a22017-09-29 17:34:39 -07001245 "portnm %llx nodenm %llx segs %d qs %d\n",
James Smartd613b6a2017-02-12 13:52:37 -08001246 phba->targetport, tgtp,
Dick Kennedye7981a22017-09-29 17:34:39 -07001247 pinfo.port_name, pinfo.node_name,
1248 lpfc_tgttemplate.max_sgl_segments,
1249 lpfc_tgttemplate.max_hw_queues);
James Smartd613b6a2017-02-12 13:52:37 -08001250
1251 atomic_set(&tgtp->rcv_ls_req_in, 0);
1252 atomic_set(&tgtp->rcv_ls_req_out, 0);
1253 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1254 atomic_set(&tgtp->xmt_ls_abort, 0);
James Smart547077a2017-05-15 15:20:40 -07001255 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001256 atomic_set(&tgtp->xmt_ls_rsp, 0);
1257 atomic_set(&tgtp->xmt_ls_drop, 0);
1258 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
James Smart4b056682017-12-08 17:18:10 -08001259 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1260 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001261 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1262 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1263 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1264 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001265 atomic_set(&tgtp->xmt_fcp_drop, 0);
1266 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1267 atomic_set(&tgtp->xmt_fcp_read, 0);
1268 atomic_set(&tgtp->xmt_fcp_write, 0);
1269 atomic_set(&tgtp->xmt_fcp_rsp, 0);
James Smart547077a2017-05-15 15:20:40 -07001270 atomic_set(&tgtp->xmt_fcp_release, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001271 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1272 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
James Smart4b056682017-12-08 17:18:10 -08001273 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1274 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001275 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
James Smart4b056682017-12-08 17:18:10 -08001276 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
James Smart547077a2017-05-15 15:20:40 -07001277 atomic_set(&tgtp->xmt_fcp_abort, 0);
1278 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1279 atomic_set(&tgtp->xmt_abort_unsol, 0);
1280 atomic_set(&tgtp->xmt_abort_sol, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001281 atomic_set(&tgtp->xmt_abort_rsp, 0);
1282 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
James Smartd613b6a2017-02-12 13:52:37 -08001283 }
1284 return error;
1285}
1286
1287int
1288lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1289{
1290 struct lpfc_vport *vport = phba->pport;
1291
1292 if (!phba->targetport)
1293 return 0;
1294
1295 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1296 "6007 Update NVMET port %p did x%x\n",
1297 phba->targetport, vport->fc_myDID);
1298
1299 phba->targetport->port_id = vport->fc_myDID;
1300 return 0;
1301}
1302
James Smart318083a2017-03-04 09:30:30 -08001303/**
1304 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1305 * @phba: pointer to lpfc hba data structure.
1306 * @axri: pointer to the nvmet xri abort wcqe structure.
1307 *
1308 * This routine is invoked by the worker thread to process a SLI4 fast-path
1309 * NVMET aborted xri.
1310 **/
1311void
1312lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1313 struct sli4_wcqe_xri_aborted *axri)
1314{
James Smart86c67372017-04-21 16:05:04 -07001315 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1316 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1317 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
James Smart4b056682017-12-08 17:18:10 -08001318 struct lpfc_nvmet_tgtport *tgtp;
James Smart86c67372017-04-21 16:05:04 -07001319 struct lpfc_nodelist *ndlp;
1320 unsigned long iflag = 0;
1321 int rrq_empty = 0;
1322 bool released = false;
1323
1324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1325 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1326
1327 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1328 return;
James Smart4b056682017-12-08 17:18:10 -08001329
1330 if (phba->targetport) {
1331 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1332 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1333 }
1334
James Smart86c67372017-04-21 16:05:04 -07001335 spin_lock_irqsave(&phba->hbalock, iflag);
1336 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1337 list_for_each_entry_safe(ctxp, next_ctxp,
1338 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1339 list) {
James Smart6c621a22017-05-15 15:20:45 -07001340 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
James Smart86c67372017-04-21 16:05:04 -07001341 continue;
1342
1343 /* Check if we already received a free context call
1344 * and we have completed processing an abort situation.
1345 */
1346 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1347 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1348 list_del(&ctxp->list);
1349 released = true;
1350 }
1351 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1352 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1353
1354 rrq_empty = list_empty(&phba->active_rrq_list);
1355 spin_unlock_irqrestore(&phba->hbalock, iflag);
1356 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1357 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1358 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1359 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1360 lpfc_set_rrq_active(phba, ndlp,
James Smart6c621a22017-05-15 15:20:45 -07001361 ctxp->ctxbuf->sglq->sli4_lxritag,
James Smart86c67372017-04-21 16:05:04 -07001362 rxid, 1);
1363 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1364 }
1365
1366 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
James Smartce1b5912017-06-01 21:06:58 -07001367 "6318 XB aborted oxid %x flg x%x (%x)\n",
James Smart86c67372017-04-21 16:05:04 -07001368 ctxp->oxid, ctxp->flag, released);
1369 if (released)
James Smart6c621a22017-05-15 15:20:45 -07001370 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1371
James Smart86c67372017-04-21 16:05:04 -07001372 if (rrq_empty)
1373 lpfc_worker_wake_up(phba);
1374 return;
1375 }
1376 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1377 spin_unlock_irqrestore(&phba->hbalock, iflag);
1378}
1379
1380int
1381lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1382 struct fc_frame_header *fc_hdr)
1383
1384{
1385#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1386 struct lpfc_hba *phba = vport->phba;
1387 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1388 struct nvmefc_tgt_fcp_req *rsp;
1389 uint16_t xri;
1390 unsigned long iflag = 0;
1391
1392 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1393
1394 spin_lock_irqsave(&phba->hbalock, iflag);
1395 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1396 list_for_each_entry_safe(ctxp, next_ctxp,
1397 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1398 list) {
James Smart6c621a22017-05-15 15:20:45 -07001399 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
James Smart86c67372017-04-21 16:05:04 -07001400 continue;
1401
1402 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1403 spin_unlock_irqrestore(&phba->hbalock, iflag);
1404
1405 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1406 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1407 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1408
1409 lpfc_nvmeio_data(phba,
1410 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1411 xri, smp_processor_id(), 0);
1412
1413 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1414 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1415
1416 rsp = &ctxp->ctx.fcp_req;
1417 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1418
1419 /* Respond with BA_ACC accordingly */
1420 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1421 return 0;
1422 }
1423 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1424 spin_unlock_irqrestore(&phba->hbalock, iflag);
1425
1426 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1427 xri, smp_processor_id(), 1);
1428
1429 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1430 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1431
1432 /* Respond with BA_RJT accordingly */
1433 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
James Smart86c67372017-04-21 16:05:04 -07001434#endif
Jens Axboeb06e13c2017-04-27 11:33:01 -06001435 return 0;
James Smart318083a2017-03-04 09:30:30 -08001436}
1437
James Smartd613b6a2017-02-12 13:52:37 -08001438void
1439lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1440{
James Smart7d708032017-03-08 14:36:01 -08001441#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
James Smartd613b6a2017-02-12 13:52:37 -08001442 struct lpfc_nvmet_tgtport *tgtp;
1443
1444 if (phba->nvmet_support == 0)
1445 return;
1446 if (phba->targetport) {
1447 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1448 init_completion(&tgtp->tport_unreg_done);
1449 nvmet_fc_unregister_targetport(phba->targetport);
1450 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
James Smart6c621a22017-05-15 15:20:45 -07001451 lpfc_nvmet_cleanup_io_context(phba);
James Smartd613b6a2017-02-12 13:52:37 -08001452 }
1453 phba->targetport = NULL;
James Smart166d7212017-03-04 09:30:33 -08001454#endif
James Smartd613b6a2017-02-12 13:52:37 -08001455}
1456
1457/**
1458 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1459 * @phba: pointer to lpfc hba data structure.
1460 * @pring: pointer to a SLI ring.
1461 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1462 *
1463 * This routine is used for processing the WQE associated with a unsolicited
1464 * event. It first determines whether there is an existing ndlp that matches
1465 * the DID from the unsolicited WQE. If not, it will create a new one with
1466 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1467 * WQE is then used to invoke the proper routine and to set up proper state
1468 * of the discovery state machine.
1469 **/
1470static void
1471lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1472 struct hbq_dmabuf *nvmebuf)
1473{
James Smart7d708032017-03-08 14:36:01 -08001474#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
James Smartd613b6a2017-02-12 13:52:37 -08001475 struct lpfc_nvmet_tgtport *tgtp;
1476 struct fc_frame_header *fc_hdr;
1477 struct lpfc_nvmet_rcv_ctx *ctxp;
1478 uint32_t *payload;
1479 uint32_t size, oxid, sid, rc;
1480
1481 if (!nvmebuf || !phba->targetport) {
1482 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1483 "6154 LS Drop IO\n");
James Smart2b65e182017-02-12 13:52:38 -08001484 oxid = 0;
1485 size = 0;
1486 sid = 0;
James Smart547077a2017-05-15 15:20:40 -07001487 ctxp = NULL;
James Smartd613b6a2017-02-12 13:52:37 -08001488 goto dropit;
1489 }
1490
1491 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1492 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1493 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1494 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1495 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1496 sid = sli4_sid_from_fc_hdr(fc_hdr);
1497
1498 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1499 if (ctxp == NULL) {
1500 atomic_inc(&tgtp->rcv_ls_req_drop);
1501 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1502 "6155 LS Drop IO x%x: Alloc\n",
1503 oxid);
1504dropit:
James Smart2b65e182017-02-12 13:52:38 -08001505 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1506 "xri x%x sz %d from %06x\n",
1507 oxid, size, sid);
James Smartd613b6a2017-02-12 13:52:37 -08001508 if (nvmebuf)
1509 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1510 return;
1511 }
1512 ctxp->phba = phba;
1513 ctxp->size = size;
1514 ctxp->oxid = oxid;
1515 ctxp->sid = sid;
1516 ctxp->wqeq = NULL;
James Smartce1b5912017-06-01 21:06:58 -07001517 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1518 ctxp->entry_cnt = 1;
James Smartd613b6a2017-02-12 13:52:37 -08001519 ctxp->rqb_buffer = (void *)nvmebuf;
James Smart2b65e182017-02-12 13:52:38 -08001520
1521 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1522 oxid, size, sid);
James Smartd613b6a2017-02-12 13:52:37 -08001523 /*
1524 * The calling sequence should be:
1525 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1526 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1527 */
1528 atomic_inc(&tgtp->rcv_ls_req_in);
1529 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1530 payload, size);
1531
1532 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -07001533 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1534 "%08x %08x %08x\n", size, rc,
James Smartd613b6a2017-02-12 13:52:37 -08001535 *payload, *(payload+1), *(payload+2),
1536 *(payload+3), *(payload+4), *(payload+5));
James Smart2b65e182017-02-12 13:52:38 -08001537
James Smartd613b6a2017-02-12 13:52:37 -08001538 if (rc == 0) {
1539 atomic_inc(&tgtp->rcv_ls_req_out);
1540 return;
1541 }
James Smart2b65e182017-02-12 13:52:38 -08001542
1543 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1544 oxid, size, sid);
1545
James Smartd613b6a2017-02-12 13:52:37 -08001546 atomic_inc(&tgtp->rcv_ls_req_drop);
1547 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1548 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1549 ctxp->oxid, rc);
1550
1551 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1552 if (nvmebuf)
1553 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1554
1555 atomic_inc(&tgtp->xmt_ls_abort);
1556 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
James Smart166d7212017-03-04 09:30:33 -08001557#endif
James Smartd613b6a2017-02-12 13:52:37 -08001558}
1559
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001560static struct lpfc_nvmet_ctxbuf *
1561lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1562 struct lpfc_nvmet_ctx_info *current_infop)
1563{
Dick Kennedy2299e432017-09-29 17:34:31 -07001564#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001565 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1566 struct lpfc_nvmet_ctx_info *get_infop;
1567 int i;
1568
1569 /*
1570 * The current_infop for the MRQ a NVME command IU was received
1571 * on is empty. Our goal is to replenish this MRQs context
1572 * list from a another CPUs.
1573 *
1574 * First we need to pick a context list to start looking on.
1575 * nvmet_ctx_start_cpu has available context the last time
1576 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1577 * is just the next sequential CPU for this MRQ.
1578 */
1579 if (current_infop->nvmet_ctx_start_cpu)
1580 get_infop = current_infop->nvmet_ctx_start_cpu;
1581 else
1582 get_infop = current_infop->nvmet_ctx_next_cpu;
1583
1584 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1585 if (get_infop == current_infop) {
1586 get_infop = get_infop->nvmet_ctx_next_cpu;
1587 continue;
1588 }
1589 spin_lock(&get_infop->nvmet_ctx_list_lock);
1590
1591 /* Just take the entire context list, if there are any */
1592 if (get_infop->nvmet_ctx_list_cnt) {
1593 list_splice_init(&get_infop->nvmet_ctx_list,
1594 &current_infop->nvmet_ctx_list);
1595 current_infop->nvmet_ctx_list_cnt =
1596 get_infop->nvmet_ctx_list_cnt - 1;
1597 get_infop->nvmet_ctx_list_cnt = 0;
1598 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1599
1600 current_infop->nvmet_ctx_start_cpu = get_infop;
1601 list_remove_head(&current_infop->nvmet_ctx_list,
1602 ctx_buf, struct lpfc_nvmet_ctxbuf,
1603 list);
1604 return ctx_buf;
1605 }
1606
1607 /* Otherwise, move on to the next CPU for this MRQ */
1608 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1609 get_infop = get_infop->nvmet_ctx_next_cpu;
1610 }
1611
Dick Kennedy2299e432017-09-29 17:34:31 -07001612#endif
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001613 /* Nothing found, all contexts for the MRQ are in-flight */
1614 return NULL;
1615}
1616
James Smartd613b6a2017-02-12 13:52:37 -08001617/**
1618 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1619 * @phba: pointer to lpfc hba data structure.
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001620 * @idx: relative index of MRQ vector
James Smartd613b6a2017-02-12 13:52:37 -08001621 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1622 *
1623 * This routine is used for processing the WQE associated with a unsolicited
1624 * event. It first determines whether there is an existing ndlp that matches
1625 * the DID from the unsolicited WQE. If not, it will create a new one with
1626 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1627 * WQE is then used to invoke the proper routine and to set up proper state
1628 * of the discovery state machine.
1629 **/
1630static void
1631lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001632 uint32_t idx,
James Smartd613b6a2017-02-12 13:52:37 -08001633 struct rqb_dmabuf *nvmebuf,
1634 uint64_t isr_timestamp)
1635{
1636 struct lpfc_nvmet_rcv_ctx *ctxp;
1637 struct lpfc_nvmet_tgtport *tgtp;
1638 struct fc_frame_header *fc_hdr;
James Smart6c621a22017-05-15 15:20:45 -07001639 struct lpfc_nvmet_ctxbuf *ctx_buf;
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001640 struct lpfc_nvmet_ctx_info *current_infop;
James Smartd613b6a2017-02-12 13:52:37 -08001641 uint32_t *payload;
James Smarta8cf5df2017-05-15 15:20:46 -07001642 uint32_t size, oxid, sid, rc, qno;
James Smart6c621a22017-05-15 15:20:45 -07001643 unsigned long iflag;
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001644 int current_cpu;
James Smart2b65e182017-02-12 13:52:38 -08001645#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1646 uint32_t id;
1647#endif
James Smartd613b6a2017-02-12 13:52:37 -08001648
Arnd Bergmann73626172017-08-25 01:09:59 +02001649 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1650 return;
1651
James Smart6c621a22017-05-15 15:20:45 -07001652 ctx_buf = NULL;
James Smartd613b6a2017-02-12 13:52:37 -08001653 if (!nvmebuf || !phba->targetport) {
1654 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smart6c621a22017-05-15 15:20:45 -07001655 "6157 NVMET FCP Drop IO\n");
James Smart2b65e182017-02-12 13:52:38 -08001656 oxid = 0;
1657 size = 0;
1658 sid = 0;
James Smart547077a2017-05-15 15:20:40 -07001659 ctxp = NULL;
James Smartd613b6a2017-02-12 13:52:37 -08001660 goto dropit;
1661 }
1662
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001663 /*
1664 * Get a pointer to the context list for this MRQ based on
1665 * the CPU this MRQ IRQ is associated with. If the CPU association
1666 * changes from our initial assumption, the context list could
1667 * be empty, thus it would need to be replenished with the
1668 * context list from another CPU for this MRQ.
1669 */
1670 current_cpu = smp_processor_id();
1671 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1672 spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1673 if (current_infop->nvmet_ctx_list_cnt) {
1674 list_remove_head(&current_infop->nvmet_ctx_list,
James Smart6c621a22017-05-15 15:20:45 -07001675 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001676 current_infop->nvmet_ctx_list_cnt--;
James Smart966bb5b2017-06-15 22:56:45 -07001677 } else {
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001678 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
James Smart6c621a22017-05-15 15:20:45 -07001679 }
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001680 spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
James Smart6c621a22017-05-15 15:20:45 -07001681
James Smarta8cf5df2017-05-15 15:20:46 -07001682 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1683 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1684 size = nvmebuf->bytes_recv;
1685
1686#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1687 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1688 id = smp_processor_id();
1689 if (id < LPFC_CHECK_CPU_CNT)
1690 phba->cpucheck_rcv_io[id]++;
1691 }
1692#endif
1693
1694 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1695 oxid, size, smp_processor_id());
1696
James Smart6c621a22017-05-15 15:20:45 -07001697 if (!ctx_buf) {
James Smarta8cf5df2017-05-15 15:20:46 -07001698 /* Queue this NVME IO to process later */
1699 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1700 list_add_tail(&nvmebuf->hbuf.list,
1701 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1702 phba->sli4_hba.nvmet_io_wait_cnt++;
1703 phba->sli4_hba.nvmet_io_wait_total++;
1704 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1705 iflag);
1706
1707 /* Post a brand new DMA buffer to RQ */
1708 qno = nvmebuf->idx;
1709 lpfc_post_rq_buffer(
1710 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1711 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1712 return;
James Smart6c621a22017-05-15 15:20:45 -07001713 }
James Smartd613b6a2017-02-12 13:52:37 -08001714
1715 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1716 payload = (uint32_t *)(nvmebuf->dbuf.virt);
James Smartd613b6a2017-02-12 13:52:37 -08001717 sid = sli4_sid_from_fc_hdr(fc_hdr);
1718
James Smart6c621a22017-05-15 15:20:45 -07001719 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
James Smartce1b5912017-06-01 21:06:58 -07001720 if (ctxp->state != LPFC_NVMET_STE_FREE) {
1721 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1722 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1723 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1724 }
James Smartd613b6a2017-02-12 13:52:37 -08001725 ctxp->wqeq = NULL;
1726 ctxp->txrdy = NULL;
1727 ctxp->offset = 0;
1728 ctxp->phba = phba;
1729 ctxp->size = size;
1730 ctxp->oxid = oxid;
1731 ctxp->sid = sid;
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001732 ctxp->idx = idx;
James Smartd613b6a2017-02-12 13:52:37 -08001733 ctxp->state = LPFC_NVMET_STE_RCV;
James Smartd613b6a2017-02-12 13:52:37 -08001734 ctxp->entry_cnt = 1;
1735 ctxp->flag = 0;
James Smart6c621a22017-05-15 15:20:45 -07001736 ctxp->ctxbuf = ctx_buf;
James Smartcbc5de12017-12-08 17:18:04 -08001737 ctxp->rqb_buffer = (void *)nvmebuf;
James Smart2b7824d2017-04-21 16:04:59 -07001738 spin_lock_init(&ctxp->ctxlock);
James Smartd613b6a2017-02-12 13:52:37 -08001739
James Smart2b65e182017-02-12 13:52:38 -08001740#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedyc8a4ce02017-09-29 17:34:33 -07001741 if (isr_timestamp) {
James Smart2b65e182017-02-12 13:52:38 -08001742 ctxp->ts_isr_cmd = isr_timestamp;
1743 ctxp->ts_cmd_nvme = ktime_get_ns();
1744 ctxp->ts_nvme_data = 0;
1745 ctxp->ts_data_wqput = 0;
1746 ctxp->ts_isr_data = 0;
1747 ctxp->ts_data_nvme = 0;
1748 ctxp->ts_nvme_status = 0;
1749 ctxp->ts_status_wqput = 0;
1750 ctxp->ts_isr_status = 0;
1751 ctxp->ts_status_nvme = 0;
Dick Kennedyc8a4ce02017-09-29 17:34:33 -07001752 } else {
1753 ctxp->ts_cmd_nvme = 0;
James Smart2b65e182017-02-12 13:52:38 -08001754 }
James Smart2b65e182017-02-12 13:52:38 -08001755#endif
1756
James Smartd613b6a2017-02-12 13:52:37 -08001757 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1758 /*
1759 * The calling sequence should be:
1760 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1761 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
James Smart6c621a22017-05-15 15:20:45 -07001762 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1763 * the NVME command / FC header is stored, so we are free to repost
1764 * the buffer.
James Smartd613b6a2017-02-12 13:52:37 -08001765 */
1766 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1767 payload, size);
1768
1769 /* Process FCP command */
1770 if (rc == 0) {
James Smartcbc5de12017-12-08 17:18:04 -08001771 ctxp->rqb_buffer = NULL;
James Smartd613b6a2017-02-12 13:52:37 -08001772 atomic_inc(&tgtp->rcv_fcp_cmd_out);
James Smart6c621a22017-05-15 15:20:45 -07001773 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
James Smartd613b6a2017-02-12 13:52:37 -08001774 return;
1775 }
1776
James Smart50738422017-08-01 15:12:40 -07001777 /* Processing of FCP command is deferred */
1778 if (rc == -EOVERFLOW) {
1779 lpfc_nvmeio_data(phba,
1780 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1781 oxid, size, sid);
1782 /* defer reposting rcv buffer till .defer_rcv callback */
James Smartcbc5de12017-12-08 17:18:04 -08001783 ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
James Smart50738422017-08-01 15:12:40 -07001784 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1785 return;
1786 }
James Smartcbc5de12017-12-08 17:18:04 -08001787 ctxp->rqb_buffer = nvmebuf;
James Smart50738422017-08-01 15:12:40 -07001788
James Smartd613b6a2017-02-12 13:52:37 -08001789 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1790 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smart547077a2017-05-15 15:20:40 -07001791 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1792 ctxp->oxid, rc,
1793 atomic_read(&tgtp->rcv_fcp_cmd_in),
1794 atomic_read(&tgtp->rcv_fcp_cmd_out),
1795 atomic_read(&tgtp->xmt_fcp_release));
James Smartd613b6a2017-02-12 13:52:37 -08001796dropit:
James Smart2b65e182017-02-12 13:52:38 -08001797 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1798 oxid, size, sid);
James Smartd613b6a2017-02-12 13:52:37 -08001799 if (oxid) {
James Smart6c621a22017-05-15 15:20:45 -07001800 lpfc_nvmet_defer_release(phba, ctxp);
James Smartd613b6a2017-02-12 13:52:37 -08001801 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
James Smart6c621a22017-05-15 15:20:45 -07001802 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
James Smartd613b6a2017-02-12 13:52:37 -08001803 return;
1804 }
1805
James Smart6c621a22017-05-15 15:20:45 -07001806 if (ctx_buf)
1807 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1808
1809 if (nvmebuf)
1810 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
James Smartd613b6a2017-02-12 13:52:37 -08001811}
1812
1813/**
1814 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1815 * @phba: pointer to lpfc hba data structure.
1816 * @pring: pointer to a SLI ring.
1817 * @nvmebuf: pointer to received nvme data structure.
1818 *
1819 * This routine is used to process an unsolicited event received from a SLI
1820 * (Service Level Interface) ring. The actual processing of the data buffer
1821 * associated with the unsolicited event is done by invoking the routine
1822 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1823 * SLI RQ on which the unsolicited event was received.
1824 **/
1825void
1826lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1827 struct lpfc_iocbq *piocb)
1828{
1829 struct lpfc_dmabuf *d_buf;
1830 struct hbq_dmabuf *nvmebuf;
1831
1832 d_buf = piocb->context2;
1833 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1834
1835 if (phba->nvmet_support == 0) {
1836 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1837 return;
1838 }
1839 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1840}
1841
1842/**
1843 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1844 * @phba: pointer to lpfc hba data structure.
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001845 * @idx: relative index of MRQ vector
James Smartd613b6a2017-02-12 13:52:37 -08001846 * @nvmebuf: pointer to received nvme data structure.
1847 *
1848 * This routine is used to process an unsolicited event received from a SLI
1849 * (Service Level Interface) ring. The actual processing of the data buffer
1850 * associated with the unsolicited event is done by invoking the routine
1851 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1852 * SLI RQ on which the unsolicited event was received.
1853 **/
1854void
1855lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001856 uint32_t idx,
James Smartd613b6a2017-02-12 13:52:37 -08001857 struct rqb_dmabuf *nvmebuf,
1858 uint64_t isr_timestamp)
1859{
1860 if (phba->nvmet_support == 0) {
James Smart6c621a22017-05-15 15:20:45 -07001861 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
James Smartd613b6a2017-02-12 13:52:37 -08001862 return;
1863 }
Dick Kennedy66d7ce92017-08-23 16:55:42 -07001864 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
James Smartd613b6a2017-02-12 13:52:37 -08001865 isr_timestamp);
1866}
1867
1868/**
1869 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1870 * @phba: pointer to a host N_Port data structure.
1871 * @ctxp: Context info for NVME LS Request
1872 * @rspbuf: DMA buffer of NVME command.
1873 * @rspsize: size of the NVME command.
1874 *
1875 * This routine is used for allocating a lpfc-WQE data structure from
1876 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1877 * passed into the routine for discovery state machine to issue an Extended
1878 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1879 * and preparation routine that is used by all the discovery state machine
1880 * routines and the NVME command-specific fields will be later set up by
1881 * the individual discovery machine routines after calling this routine
1882 * allocating and preparing a generic WQE data structure. It fills in the
1883 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1884 * payload and response payload (if expected). The reference count on the
1885 * ndlp is incremented by 1 and the reference to the ndlp is put into
1886 * context1 of the WQE data structure for this WQE to hold the ndlp
1887 * reference for the command's callback function to access later.
1888 *
1889 * Return code
1890 * Pointer to the newly allocated/prepared nvme wqe data structure
1891 * NULL - when nvme wqe data structure allocation/preparation failed
1892 **/
1893static struct lpfc_iocbq *
1894lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1895 struct lpfc_nvmet_rcv_ctx *ctxp,
1896 dma_addr_t rspbuf, uint16_t rspsize)
1897{
1898 struct lpfc_nodelist *ndlp;
1899 struct lpfc_iocbq *nvmewqe;
1900 union lpfc_wqe *wqe;
1901
1902 if (!lpfc_is_link_up(phba)) {
1903 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -07001904 "6104 NVMET prep LS wqe: link err: "
1905 "NPORT x%x oxid:x%x ste %d\n",
1906 ctxp->sid, ctxp->oxid, ctxp->state);
James Smartd613b6a2017-02-12 13:52:37 -08001907 return NULL;
1908 }
1909
1910 /* Allocate buffer for command wqe */
1911 nvmewqe = lpfc_sli_get_iocbq(phba);
1912 if (nvmewqe == NULL) {
1913 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -07001914 "6105 NVMET prep LS wqe: No WQE: "
1915 "NPORT x%x oxid x%x ste %d\n",
1916 ctxp->sid, ctxp->oxid, ctxp->state);
James Smartd613b6a2017-02-12 13:52:37 -08001917 return NULL;
1918 }
1919
1920 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1921 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1922 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1923 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1924 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -07001925 "6106 NVMET prep LS wqe: No ndlp: "
1926 "NPORT x%x oxid x%x ste %d\n",
1927 ctxp->sid, ctxp->oxid, ctxp->state);
James Smartd613b6a2017-02-12 13:52:37 -08001928 goto nvme_wqe_free_wqeq_exit;
1929 }
1930 ctxp->wqeq = nvmewqe;
1931
1932 /* prevent preparing wqe with NULL ndlp reference */
1933 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1934 if (nvmewqe->context1 == NULL)
1935 goto nvme_wqe_free_wqeq_exit;
1936 nvmewqe->context2 = ctxp;
1937
1938 wqe = &nvmewqe->wqe;
1939 memset(wqe, 0, sizeof(union lpfc_wqe));
1940
1941 /* Words 0 - 2 */
1942 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1943 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1944 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1945 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1946
1947 /* Word 3 */
1948
1949 /* Word 4 */
1950
1951 /* Word 5 */
1952 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1953 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1954 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
James Smart8b361632017-03-04 09:30:26 -08001955 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
James Smartd613b6a2017-02-12 13:52:37 -08001956 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1957
1958 /* Word 6 */
1959 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1960 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1961 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1962
1963 /* Word 7 */
1964 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1965 CMD_XMIT_SEQUENCE64_WQE);
1966 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1967 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1968 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1969
1970 /* Word 8 */
1971 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1972
1973 /* Word 9 */
1974 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1975 /* Needs to be set by caller */
1976 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1977
1978 /* Word 10 */
1979 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1980 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1981 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1982 LPFC_WQE_LENLOC_WORD12);
1983 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1984
1985 /* Word 11 */
1986 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1987 LPFC_WQE_CQ_ID_DEFAULT);
1988 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1989 OTHER_COMMAND);
1990
1991 /* Word 12 */
1992 wqe->xmit_sequence.xmit_len = rspsize;
1993
1994 nvmewqe->retry = 1;
1995 nvmewqe->vport = phba->pport;
1996 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1997 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1998
James Smartce1b5912017-06-01 21:06:58 -07001999 /* Xmit NVMET response to remote NPORT <did> */
James Smartd613b6a2017-02-12 13:52:37 -08002000 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
James Smartce1b5912017-06-01 21:06:58 -07002001 "6039 Xmit NVMET LS response to remote "
James Smartd613b6a2017-02-12 13:52:37 -08002002 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2003 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2004 rspsize);
2005 return nvmewqe;
2006
2007nvme_wqe_free_wqeq_exit:
2008 nvmewqe->context2 = NULL;
2009 nvmewqe->context3 = NULL;
2010 lpfc_sli_release_iocbq(phba, nvmewqe);
2011 return NULL;
2012}
2013
2014
2015static struct lpfc_iocbq *
2016lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2017 struct lpfc_nvmet_rcv_ctx *ctxp)
2018{
2019 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2020 struct lpfc_nvmet_tgtport *tgtp;
2021 struct sli4_sge *sgl;
2022 struct lpfc_nodelist *ndlp;
2023 struct lpfc_iocbq *nvmewqe;
2024 struct scatterlist *sgel;
2025 union lpfc_wqe128 *wqe;
2026 uint32_t *txrdy;
2027 dma_addr_t physaddr;
2028 int i, cnt;
2029 int xc = 1;
2030
2031 if (!lpfc_is_link_up(phba)) {
2032 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smartce1b5912017-06-01 21:06:58 -07002033 "6107 NVMET prep FCP wqe: link err:"
2034 "NPORT x%x oxid x%x ste %d\n",
2035 ctxp->sid, ctxp->oxid, ctxp->state);
James Smartd613b6a2017-02-12 13:52:37 -08002036 return NULL;
2037 }
2038
2039 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2040 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2041 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2042 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2043 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smartce1b5912017-06-01 21:06:58 -07002044 "6108 NVMET prep FCP wqe: no ndlp: "
2045 "NPORT x%x oxid x%x ste %d\n",
2046 ctxp->sid, ctxp->oxid, ctxp->state);
James Smartd613b6a2017-02-12 13:52:37 -08002047 return NULL;
2048 }
2049
James Smart81e6a632017-11-20 16:00:43 -08002050 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
James Smartd613b6a2017-02-12 13:52:37 -08002051 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smartce1b5912017-06-01 21:06:58 -07002052 "6109 NVMET prep FCP wqe: seg cnt err: "
2053 "NPORT x%x oxid x%x ste %d cnt %d\n",
2054 ctxp->sid, ctxp->oxid, ctxp->state,
2055 phba->cfg_nvme_seg_cnt);
James Smartd613b6a2017-02-12 13:52:37 -08002056 return NULL;
2057 }
2058
2059 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2060 nvmewqe = ctxp->wqeq;
2061 if (nvmewqe == NULL) {
2062 /* Allocate buffer for command wqe */
James Smart6c621a22017-05-15 15:20:45 -07002063 nvmewqe = ctxp->ctxbuf->iocbq;
James Smartd613b6a2017-02-12 13:52:37 -08002064 if (nvmewqe == NULL) {
2065 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smartce1b5912017-06-01 21:06:58 -07002066 "6110 NVMET prep FCP wqe: No "
2067 "WQE: NPORT x%x oxid x%x ste %d\n",
2068 ctxp->sid, ctxp->oxid, ctxp->state);
James Smartd613b6a2017-02-12 13:52:37 -08002069 return NULL;
2070 }
2071 ctxp->wqeq = nvmewqe;
2072 xc = 0; /* create new XRI */
2073 nvmewqe->sli4_lxritag = NO_XRI;
2074 nvmewqe->sli4_xritag = NO_XRI;
2075 }
2076
2077 /* Sanity check */
2078 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2079 (ctxp->entry_cnt == 1)) ||
James Smartce1b5912017-06-01 21:06:58 -07002080 (ctxp->state == LPFC_NVMET_STE_DATA)) {
James Smartd613b6a2017-02-12 13:52:37 -08002081 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
2082 } else {
2083 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
James Smartce1b5912017-06-01 21:06:58 -07002084 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2085 ctxp->state, ctxp->entry_cnt);
James Smartd613b6a2017-02-12 13:52:37 -08002086 return NULL;
2087 }
2088
James Smart6c621a22017-05-15 15:20:45 -07002089 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
James Smartd613b6a2017-02-12 13:52:37 -08002090 switch (rsp->op) {
2091 case NVMET_FCOP_READDATA:
2092 case NVMET_FCOP_READDATA_RSP:
2093 /* Words 0 - 2 : The first sg segment */
2094 sgel = &rsp->sg[0];
2095 physaddr = sg_dma_address(sgel);
2096 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2097 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2098 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2099 wqe->fcp_tsend.bde.addrHigh =
2100 cpu_to_le32(putPaddrHigh(physaddr));
2101
2102 /* Word 3 */
2103 wqe->fcp_tsend.payload_offset_len = 0;
2104
2105 /* Word 4 */
2106 wqe->fcp_tsend.relative_offset = ctxp->offset;
2107
2108 /* Word 5 */
2109
2110 /* Word 6 */
2111 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2112 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2113 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2114 nvmewqe->sli4_xritag);
2115
2116 /* Word 7 */
Dick Kennedyc6e0c922017-08-23 16:55:43 -07002117 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
James Smartd613b6a2017-02-12 13:52:37 -08002118 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
2119
2120 /* Word 8 */
2121 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2122
2123 /* Word 9 */
2124 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2125 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2126
2127 /* Word 10 */
2128 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2129 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
2130 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
2131 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
2132 LPFC_WQE_LENLOC_WORD12);
2133 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
2134 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
2135 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2136 if (phba->cfg_nvme_oas)
2137 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
2138
2139 /* Word 11 */
2140 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
2141 LPFC_WQE_CQ_ID_DEFAULT);
2142 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
2143 FCP_COMMAND_TSEND);
2144
2145 /* Word 12 */
2146 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2147
2148 /* Setup 2 SKIP SGEs */
2149 sgl->addr_hi = 0;
2150 sgl->addr_lo = 0;
2151 sgl->word2 = 0;
2152 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2153 sgl->word2 = cpu_to_le32(sgl->word2);
2154 sgl->sge_len = 0;
2155 sgl++;
2156 sgl->addr_hi = 0;
2157 sgl->addr_lo = 0;
2158 sgl->word2 = 0;
2159 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2160 sgl->word2 = cpu_to_le32(sgl->word2);
2161 sgl->sge_len = 0;
2162 sgl++;
2163 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2164 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2165 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
2166 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
2167 (rsp->rsplen == 12)) {
2168 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
2169 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2170 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2171 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2172 } else {
2173 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2174 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2175 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2176 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2177 ((rsp->rsplen >> 2) - 1));
2178 memcpy(&wqe->words[16], rsp->rspaddr,
2179 rsp->rsplen);
2180 }
2181 } else {
2182 atomic_inc(&tgtp->xmt_fcp_read);
2183
2184 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2185 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2186 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2187 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2188 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2189 }
James Smartd613b6a2017-02-12 13:52:37 -08002190 break;
2191
2192 case NVMET_FCOP_WRITEDATA:
2193 /* Words 0 - 2 : The first sg segment */
Romain Perier771db5c2017-07-06 10:13:05 +02002194 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
James Smartd613b6a2017-02-12 13:52:37 -08002195 GFP_KERNEL, &physaddr);
2196 if (!txrdy) {
2197 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2198 "6041 Bad txrdy buffer: oxid x%x\n",
2199 ctxp->oxid);
2200 return NULL;
2201 }
2202 ctxp->txrdy = txrdy;
2203 ctxp->txrdy_phys = physaddr;
2204 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2205 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2206 wqe->fcp_treceive.bde.addrLow =
2207 cpu_to_le32(putPaddrLow(physaddr));
2208 wqe->fcp_treceive.bde.addrHigh =
2209 cpu_to_le32(putPaddrHigh(physaddr));
2210
2211 /* Word 3 */
2212 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
2213
2214 /* Word 4 */
2215 wqe->fcp_treceive.relative_offset = ctxp->offset;
2216
2217 /* Word 5 */
2218
2219 /* Word 6 */
2220 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2221 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2222 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2223 nvmewqe->sli4_xritag);
2224
2225 /* Word 7 */
Dick Kennedyc6e0c922017-08-23 16:55:43 -07002226 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
James Smartd613b6a2017-02-12 13:52:37 -08002227 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
2228 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
2229 CMD_FCP_TRECEIVE64_WQE);
2230
2231 /* Word 8 */
2232 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2233
2234 /* Word 9 */
2235 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2236 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2237
2238 /* Word 10 */
2239 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2240 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
2241 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
2242 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
2243 LPFC_WQE_LENLOC_WORD12);
2244 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
2245 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
2246 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
2247 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
2248 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2249 if (phba->cfg_nvme_oas)
2250 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
2251
2252 /* Word 11 */
2253 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
2254 LPFC_WQE_CQ_ID_DEFAULT);
2255 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
2256 FCP_COMMAND_TRECEIVE);
2257 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2258
2259 /* Word 12 */
2260 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2261
2262 /* Setup 1 TXRDY and 1 SKIP SGE */
2263 txrdy[0] = 0;
2264 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2265 txrdy[2] = 0;
2266
2267 sgl->addr_hi = putPaddrHigh(physaddr);
2268 sgl->addr_lo = putPaddrLow(physaddr);
2269 sgl->word2 = 0;
2270 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2271 sgl->word2 = cpu_to_le32(sgl->word2);
2272 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2273 sgl++;
2274 sgl->addr_hi = 0;
2275 sgl->addr_lo = 0;
2276 sgl->word2 = 0;
2277 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2278 sgl->word2 = cpu_to_le32(sgl->word2);
2279 sgl->sge_len = 0;
2280 sgl++;
James Smartd613b6a2017-02-12 13:52:37 -08002281 atomic_inc(&tgtp->xmt_fcp_write);
2282 break;
2283
2284 case NVMET_FCOP_RSP:
2285 /* Words 0 - 2 */
James Smartd613b6a2017-02-12 13:52:37 -08002286 physaddr = rsp->rspdma;
2287 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2288 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2289 wqe->fcp_trsp.bde.addrLow =
2290 cpu_to_le32(putPaddrLow(physaddr));
2291 wqe->fcp_trsp.bde.addrHigh =
2292 cpu_to_le32(putPaddrHigh(physaddr));
2293
2294 /* Word 3 */
2295 wqe->fcp_trsp.response_len = rsp->rsplen;
2296
2297 /* Word 4 */
2298 wqe->fcp_trsp.rsvd_4_5[0] = 0;
2299
2300
2301 /* Word 5 */
2302
2303 /* Word 6 */
2304 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2305 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2306 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2307 nvmewqe->sli4_xritag);
2308
2309 /* Word 7 */
Dick Kennedyc6e0c922017-08-23 16:55:43 -07002310 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
James Smartd613b6a2017-02-12 13:52:37 -08002311 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
2312 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
2313
2314 /* Word 8 */
2315 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2316
2317 /* Word 9 */
2318 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2319 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2320
2321 /* Word 10 */
2322 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2323 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
2324 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
2325 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
2326 LPFC_WQE_LENLOC_WORD3);
2327 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
2328 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2329 if (phba->cfg_nvme_oas)
2330 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
2331
2332 /* Word 11 */
2333 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
2334 LPFC_WQE_CQ_ID_DEFAULT);
2335 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
2336 FCP_COMMAND_TRSP);
2337 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
James Smartd613b6a2017-02-12 13:52:37 -08002338
2339 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2340 /* Good response - all zero's on wire */
2341 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
2342 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
2343 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
2344 } else {
2345 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2346 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2347 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2348 ((rsp->rsplen >> 2) - 1));
2349 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2350 }
2351
2352 /* Use rspbuf, NOT sg list */
2353 rsp->sg_cnt = 0;
2354 sgl->word2 = 0;
2355 atomic_inc(&tgtp->xmt_fcp_rsp);
2356 break;
2357
2358 default:
2359 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2360 "6064 Unknown Rsp Op %d\n",
2361 rsp->op);
2362 return NULL;
2363 }
2364
2365 nvmewqe->retry = 1;
2366 nvmewqe->vport = phba->pport;
2367 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2368 nvmewqe->context1 = ndlp;
2369
2370 for (i = 0; i < rsp->sg_cnt; i++) {
2371 sgel = &rsp->sg[i];
2372 physaddr = sg_dma_address(sgel);
2373 cnt = sg_dma_len(sgel);
2374 sgl->addr_hi = putPaddrHigh(physaddr);
2375 sgl->addr_lo = putPaddrLow(physaddr);
2376 sgl->word2 = 0;
2377 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2378 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2379 if ((i+1) == rsp->sg_cnt)
2380 bf_set(lpfc_sli4_sge_last, sgl, 1);
2381 sgl->word2 = cpu_to_le32(sgl->word2);
2382 sgl->sge_len = cpu_to_le32(cnt);
2383 sgl++;
2384 ctxp->offset += cnt;
2385 }
James Smartce1b5912017-06-01 21:06:58 -07002386 ctxp->state = LPFC_NVMET_STE_DATA;
2387 ctxp->entry_cnt++;
James Smartd613b6a2017-02-12 13:52:37 -08002388 return nvmewqe;
2389}
2390
2391/**
2392 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2393 * @phba: Pointer to HBA context object.
2394 * @cmdwqe: Pointer to driver command WQE object.
2395 * @wcqe: Pointer to driver response CQE object.
2396 *
2397 * The function is called from SLI ring event handler with no
2398 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2399 * The function frees memory resources used for the NVME commands.
2400 **/
2401static void
2402lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2403 struct lpfc_wcqe_complete *wcqe)
2404{
2405 struct lpfc_nvmet_rcv_ctx *ctxp;
2406 struct lpfc_nvmet_tgtport *tgtp;
2407 uint32_t status, result;
James Smart19b58d92017-04-11 11:32:29 -07002408 unsigned long flags;
2409 bool released = false;
James Smartd613b6a2017-02-12 13:52:37 -08002410
2411 ctxp = cmdwqe->context2;
2412 status = bf_get(lpfc_wcqe_c_status, wcqe);
2413 result = wcqe->parameter;
2414
2415 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
James Smart547077a2017-05-15 15:20:40 -07002416 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2417 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
James Smartd613b6a2017-02-12 13:52:37 -08002418
James Smartd613b6a2017-02-12 13:52:37 -08002419 ctxp->state = LPFC_NVMET_STE_DONE;
James Smart86c67372017-04-21 16:05:04 -07002420
2421 /* Check if we already received a free context call
2422 * and we have completed processing an abort situation.
2423 */
James Smart19b58d92017-04-11 11:32:29 -07002424 spin_lock_irqsave(&ctxp->ctxlock, flags);
James Smart86c67372017-04-21 16:05:04 -07002425 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2426 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2427 list_del(&ctxp->list);
James Smart19b58d92017-04-11 11:32:29 -07002428 released = true;
James Smart86c67372017-04-21 16:05:04 -07002429 }
James Smart19b58d92017-04-11 11:32:29 -07002430 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2431 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
James Smart547077a2017-05-15 15:20:40 -07002432 atomic_inc(&tgtp->xmt_abort_rsp);
James Smart19b58d92017-04-11 11:32:29 -07002433
James Smarte3246a12017-09-29 17:34:36 -07002434 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
James Smart86c67372017-04-21 16:05:04 -07002435 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2436 "WCQE: %08x %08x %08x %08x\n",
2437 ctxp->oxid, ctxp->flag, released,
2438 wcqe->word0, wcqe->total_data_placed,
2439 result, wcqe->word3);
2440
James Smart6c621a22017-05-15 15:20:45 -07002441 cmdwqe->context2 = NULL;
2442 cmdwqe->context3 = NULL;
James Smart19b58d92017-04-11 11:32:29 -07002443 /*
2444 * if transport has released ctx, then can reuse it. Otherwise,
2445 * will be recycled by transport release call.
2446 */
2447 if (released)
James Smart6c621a22017-05-15 15:20:45 -07002448 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
James Smartd613b6a2017-02-12 13:52:37 -08002449
James Smart6c621a22017-05-15 15:20:45 -07002450 /* This is the iocbq for the abort, not the command */
James Smartd613b6a2017-02-12 13:52:37 -08002451 lpfc_sli_release_iocbq(phba, cmdwqe);
James Smart86c67372017-04-21 16:05:04 -07002452
2453 /* Since iaab/iaar are NOT set, there is no work left.
2454 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2455 * should have been called already.
2456 */
James Smartd613b6a2017-02-12 13:52:37 -08002457}
2458
2459/**
James Smart86c67372017-04-21 16:05:04 -07002460 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
James Smartd613b6a2017-02-12 13:52:37 -08002461 * @phba: Pointer to HBA context object.
2462 * @cmdwqe: Pointer to driver command WQE object.
2463 * @wcqe: Pointer to driver response CQE object.
2464 *
2465 * The function is called from SLI ring event handler with no
2466 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2467 * The function frees memory resources used for the NVME commands.
2468 **/
2469static void
James Smart86c67372017-04-21 16:05:04 -07002470lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2471 struct lpfc_wcqe_complete *wcqe)
James Smartd613b6a2017-02-12 13:52:37 -08002472{
2473 struct lpfc_nvmet_rcv_ctx *ctxp;
2474 struct lpfc_nvmet_tgtport *tgtp;
James Smart19b58d92017-04-11 11:32:29 -07002475 unsigned long flags;
James Smartd613b6a2017-02-12 13:52:37 -08002476 uint32_t status, result;
James Smart19b58d92017-04-11 11:32:29 -07002477 bool released = false;
James Smartd613b6a2017-02-12 13:52:37 -08002478
2479 ctxp = cmdwqe->context2;
2480 status = bf_get(lpfc_wcqe_c_status, wcqe);
2481 result = wcqe->parameter;
2482
James Smart86c67372017-04-21 16:05:04 -07002483 if (!ctxp) {
2484 /* if context is clear, related io alrady complete */
2485 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2486 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2487 wcqe->word0, wcqe->total_data_placed,
2488 result, wcqe->word3);
2489 return;
James Smartd613b6a2017-02-12 13:52:37 -08002490 }
James Smart86c67372017-04-21 16:05:04 -07002491
James Smart78e1d202017-06-01 21:07:09 -07002492 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2493 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2494 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2495
James Smart86c67372017-04-21 16:05:04 -07002496 /* Sanity check */
2497 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2498 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2499 "6112 ABTS Wrong state:%d oxid x%x\n",
2500 ctxp->state, ctxp->oxid);
2501 }
2502
2503 /* Check if we already received a free context call
2504 * and we have completed processing an abort situation.
2505 */
2506 ctxp->state = LPFC_NVMET_STE_DONE;
2507 spin_lock_irqsave(&ctxp->ctxlock, flags);
2508 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2509 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2510 list_del(&ctxp->list);
2511 released = true;
2512 }
2513 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2514 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
James Smart547077a2017-05-15 15:20:40 -07002515 atomic_inc(&tgtp->xmt_abort_rsp);
James Smart86c67372017-04-21 16:05:04 -07002516
2517 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2518 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2519 "WCQE: %08x %08x %08x %08x\n",
2520 ctxp->oxid, ctxp->flag, released,
2521 wcqe->word0, wcqe->total_data_placed,
2522 result, wcqe->word3);
James Smart6c621a22017-05-15 15:20:45 -07002523
2524 cmdwqe->context2 = NULL;
2525 cmdwqe->context3 = NULL;
James Smart86c67372017-04-21 16:05:04 -07002526 /*
2527 * if transport has released ctx, then can reuse it. Otherwise,
2528 * will be recycled by transport release call.
2529 */
2530 if (released)
James Smart6c621a22017-05-15 15:20:45 -07002531 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
James Smart86c67372017-04-21 16:05:04 -07002532
2533 /* Since iaab/iaar are NOT set, there is no work left.
2534 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2535 * should have been called already.
2536 */
James Smartd613b6a2017-02-12 13:52:37 -08002537}
2538
2539/**
2540 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2541 * @phba: Pointer to HBA context object.
2542 * @cmdwqe: Pointer to driver command WQE object.
2543 * @wcqe: Pointer to driver response CQE object.
2544 *
2545 * The function is called from SLI ring event handler with no
2546 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2547 * The function frees memory resources used for the NVME commands.
2548 **/
2549static void
2550lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2551 struct lpfc_wcqe_complete *wcqe)
2552{
2553 struct lpfc_nvmet_rcv_ctx *ctxp;
2554 struct lpfc_nvmet_tgtport *tgtp;
2555 uint32_t status, result;
2556
2557 ctxp = cmdwqe->context2;
2558 status = bf_get(lpfc_wcqe_c_status, wcqe);
2559 result = wcqe->parameter;
2560
2561 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
James Smart547077a2017-05-15 15:20:40 -07002562 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
James Smartd613b6a2017-02-12 13:52:37 -08002563
2564 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
James Smartce1b5912017-06-01 21:06:58 -07002565 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
James Smartd613b6a2017-02-12 13:52:37 -08002566 ctxp, wcqe->word0, wcqe->total_data_placed,
2567 result, wcqe->word3);
2568
James Smartce1b5912017-06-01 21:06:58 -07002569 if (!ctxp) {
2570 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2571 "6415 NVMET LS Abort No ctx: WCQE: "
2572 "%08x %08x %08x %08x\n",
2573 wcqe->word0, wcqe->total_data_placed,
2574 result, wcqe->word3);
2575
James Smartd613b6a2017-02-12 13:52:37 -08002576 lpfc_sli_release_iocbq(phba, cmdwqe);
James Smartce1b5912017-06-01 21:06:58 -07002577 return;
2578 }
2579
2580 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2581 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2582 "6416 NVMET LS abort cmpl state mismatch: "
2583 "oxid x%x: %d %d\n",
2584 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2585 }
2586
2587 cmdwqe->context2 = NULL;
2588 cmdwqe->context3 = NULL;
2589 lpfc_sli_release_iocbq(phba, cmdwqe);
2590 kfree(ctxp);
James Smartd613b6a2017-02-12 13:52:37 -08002591}
2592
2593static int
2594lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2595 struct lpfc_nvmet_rcv_ctx *ctxp,
2596 uint32_t sid, uint16_t xri)
2597{
2598 struct lpfc_nvmet_tgtport *tgtp;
2599 struct lpfc_iocbq *abts_wqeq;
2600 union lpfc_wqe *wqe_abts;
2601 struct lpfc_nodelist *ndlp;
2602
2603 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
James Smart86c67372017-04-21 16:05:04 -07002604 "6067 ABTS: sid %x xri x%x/x%x\n",
James Smart318083a2017-03-04 09:30:30 -08002605 sid, xri, ctxp->wqeq->sli4_xritag);
James Smartd613b6a2017-02-12 13:52:37 -08002606
2607 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2608
2609 ndlp = lpfc_findnode_did(phba->pport, sid);
2610 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2611 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2612 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2613 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smartce1b5912017-06-01 21:06:58 -07002614 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
James Smartd613b6a2017-02-12 13:52:37 -08002615 "6134 Drop ABTS - wrong NDLP state x%x.\n",
James Smartb5ccc7d2017-03-04 09:30:39 -08002616 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
James Smartd613b6a2017-02-12 13:52:37 -08002617
2618 /* No failure to an ABTS request. */
2619 return 0;
2620 }
2621
2622 abts_wqeq = ctxp->wqeq;
2623 wqe_abts = &abts_wqeq->wqe;
James Smartd613b6a2017-02-12 13:52:37 -08002624
2625 /*
2626 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2627 * that were initialized in lpfc_sli4_nvmet_alloc.
2628 */
2629 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2630
2631 /* Word 5 */
2632 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2633 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2634 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2635 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2636 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2637
2638 /* Word 6 */
2639 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2640 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2641 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2642 abts_wqeq->sli4_xritag);
2643
2644 /* Word 7 */
2645 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2646 CMD_XMIT_SEQUENCE64_WQE);
2647 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2648 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2649 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2650
2651 /* Word 8 */
2652 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2653
2654 /* Word 9 */
2655 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2656 /* Needs to be set by caller */
2657 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2658
2659 /* Word 10 */
2660 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2661 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2662 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2663 LPFC_WQE_LENLOC_WORD12);
2664 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2665 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2666
2667 /* Word 11 */
2668 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2669 LPFC_WQE_CQ_ID_DEFAULT);
2670 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2671 OTHER_COMMAND);
2672
2673 abts_wqeq->vport = phba->pport;
2674 abts_wqeq->context1 = ndlp;
2675 abts_wqeq->context2 = ctxp;
2676 abts_wqeq->context3 = NULL;
2677 abts_wqeq->rsvd2 = 0;
2678 /* hba_wqidx should already be setup from command we are aborting */
2679 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2680 abts_wqeq->iocb.ulpLe = 1;
2681
2682 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2683 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2684 xri, abts_wqeq->iotag);
2685 return 1;
2686}
2687
2688static int
2689lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2690 struct lpfc_nvmet_rcv_ctx *ctxp,
2691 uint32_t sid, uint16_t xri)
2692{
2693 struct lpfc_nvmet_tgtport *tgtp;
2694 struct lpfc_iocbq *abts_wqeq;
2695 union lpfc_wqe *abts_wqe;
2696 struct lpfc_nodelist *ndlp;
2697 unsigned long flags;
2698 int rc;
2699
2700 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2701 if (!ctxp->wqeq) {
James Smart6c621a22017-05-15 15:20:45 -07002702 ctxp->wqeq = ctxp->ctxbuf->iocbq;
James Smartd613b6a2017-02-12 13:52:37 -08002703 ctxp->wqeq->hba_wqidx = 0;
2704 }
2705
2706 ndlp = lpfc_findnode_did(phba->pport, sid);
2707 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2708 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2709 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2710 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smartce1b5912017-06-01 21:06:58 -07002711 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
James Smart86c67372017-04-21 16:05:04 -07002712 "6160 Drop ABORT - wrong NDLP state x%x.\n",
James Smartb5ccc7d2017-03-04 09:30:39 -08002713 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
James Smartd613b6a2017-02-12 13:52:37 -08002714
2715 /* No failure to an ABTS request. */
James Smart86c67372017-04-21 16:05:04 -07002716 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
James Smartd613b6a2017-02-12 13:52:37 -08002717 return 0;
2718 }
2719
2720 /* Issue ABTS for this WQE based on iotag */
2721 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2722 if (!ctxp->abort_wqeq) {
James Smart547077a2017-05-15 15:20:40 -07002723 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smartce1b5912017-06-01 21:06:58 -07002724 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
James Smart86c67372017-04-21 16:05:04 -07002725 "6161 ABORT failed: No wqeqs: "
James Smartd613b6a2017-02-12 13:52:37 -08002726 "xri: x%x\n", ctxp->oxid);
2727 /* No failure to an ABTS request. */
James Smart86c67372017-04-21 16:05:04 -07002728 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
James Smartd613b6a2017-02-12 13:52:37 -08002729 return 0;
2730 }
2731 abts_wqeq = ctxp->abort_wqeq;
2732 abts_wqe = &abts_wqeq->wqe;
2733 ctxp->state = LPFC_NVMET_STE_ABORT;
2734
2735 /* Announce entry to new IO submit field. */
James Smart86c67372017-04-21 16:05:04 -07002736 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2737 "6162 ABORT Request to rport DID x%06x "
James Smartd613b6a2017-02-12 13:52:37 -08002738 "for xri x%x x%x\n",
2739 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2740
2741 /* If the hba is getting reset, this flag is set. It is
2742 * cleared when the reset is complete and rings reestablished.
2743 */
2744 spin_lock_irqsave(&phba->hbalock, flags);
2745 /* driver queued commands are in process of being flushed */
2746 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2747 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart547077a2017-05-15 15:20:40 -07002748 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smartd613b6a2017-02-12 13:52:37 -08002749 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2750 "6163 Driver in reset cleanup - flushing "
2751 "NVME Req now. hba_flag x%x oxid x%x\n",
2752 phba->hba_flag, ctxp->oxid);
2753 lpfc_sli_release_iocbq(phba, abts_wqeq);
James Smart86c67372017-04-21 16:05:04 -07002754 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
James Smartd613b6a2017-02-12 13:52:37 -08002755 return 0;
2756 }
2757
2758 /* Outstanding abort is in progress */
2759 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2760 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart547077a2017-05-15 15:20:40 -07002761 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smartd613b6a2017-02-12 13:52:37 -08002762 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2763 "6164 Outstanding NVME I/O Abort Request "
2764 "still pending on oxid x%x\n",
2765 ctxp->oxid);
2766 lpfc_sli_release_iocbq(phba, abts_wqeq);
James Smart86c67372017-04-21 16:05:04 -07002767 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
James Smartd613b6a2017-02-12 13:52:37 -08002768 return 0;
2769 }
2770
2771 /* Ready - mark outstanding as aborted by driver. */
2772 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2773
2774 /* WQEs are reused. Clear stale data and set key fields to
2775 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2776 */
2777 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2778
2779 /* word 3 */
2780 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2781
2782 /* word 7 */
2783 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2784 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2785
2786 /* word 8 - tell the FW to abort the IO associated with this
2787 * outstanding exchange ID.
2788 */
2789 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2790
2791 /* word 9 - this is the iotag for the abts_wqe completion. */
2792 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2793 abts_wqeq->iotag);
2794
2795 /* word 10 */
2796 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2797 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2798
2799 /* word 11 */
2800 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2801 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2802 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2803
2804 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2805 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2806 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2807 abts_wqeq->iocb_cmpl = 0;
2808 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2809 abts_wqeq->context2 = ctxp;
James Smart4550f9c2017-06-15 22:56:49 -07002810 abts_wqeq->vport = phba->pport;
James Smartd613b6a2017-02-12 13:52:37 -08002811 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2812 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart547077a2017-05-15 15:20:40 -07002813 if (rc == WQE_SUCCESS) {
2814 atomic_inc(&tgtp->xmt_abort_sol);
James Smartd613b6a2017-02-12 13:52:37 -08002815 return 0;
James Smart547077a2017-05-15 15:20:40 -07002816 }
James Smartd613b6a2017-02-12 13:52:37 -08002817
James Smart547077a2017-05-15 15:20:40 -07002818 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smart86c67372017-04-21 16:05:04 -07002819 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
James Smartd613b6a2017-02-12 13:52:37 -08002820 lpfc_sli_release_iocbq(phba, abts_wqeq);
James Smart86c67372017-04-21 16:05:04 -07002821 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2822 "6166 Failed ABORT issue_wqe with status x%x "
James Smartd613b6a2017-02-12 13:52:37 -08002823 "for oxid x%x.\n",
2824 rc, ctxp->oxid);
2825 return 1;
2826}
2827
2828
2829static int
2830lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2831 struct lpfc_nvmet_rcv_ctx *ctxp,
2832 uint32_t sid, uint16_t xri)
2833{
2834 struct lpfc_nvmet_tgtport *tgtp;
2835 struct lpfc_iocbq *abts_wqeq;
2836 unsigned long flags;
2837 int rc;
2838
2839 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2840 if (!ctxp->wqeq) {
James Smart6c621a22017-05-15 15:20:45 -07002841 ctxp->wqeq = ctxp->ctxbuf->iocbq;
James Smartd613b6a2017-02-12 13:52:37 -08002842 ctxp->wqeq->hba_wqidx = 0;
2843 }
2844
James Smartce1b5912017-06-01 21:06:58 -07002845 if (ctxp->state == LPFC_NVMET_STE_FREE) {
2846 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2847 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
2848 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2849 rc = WQE_BUSY;
2850 goto aerr;
2851 }
2852 ctxp->state = LPFC_NVMET_STE_ABORT;
2853 ctxp->entry_cnt++;
James Smartd613b6a2017-02-12 13:52:37 -08002854 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2855 if (rc == 0)
2856 goto aerr;
2857
2858 spin_lock_irqsave(&phba->hbalock, flags);
2859 abts_wqeq = ctxp->wqeq;
James Smart86c67372017-04-21 16:05:04 -07002860 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2861 abts_wqeq->iocb_cmpl = NULL;
James Smartd613b6a2017-02-12 13:52:37 -08002862 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2863 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2864 spin_unlock_irqrestore(&phba->hbalock, flags);
2865 if (rc == WQE_SUCCESS) {
James Smartd613b6a2017-02-12 13:52:37 -08002866 return 0;
2867 }
2868
2869aerr:
James Smart86c67372017-04-21 16:05:04 -07002870 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
James Smartd613b6a2017-02-12 13:52:37 -08002871 atomic_inc(&tgtp->xmt_abort_rsp_error);
James Smartce1b5912017-06-01 21:06:58 -07002872 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
James Smartd613b6a2017-02-12 13:52:37 -08002873 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2874 ctxp->oxid, rc);
2875 return 1;
2876}
2877
2878static int
2879lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2880 struct lpfc_nvmet_rcv_ctx *ctxp,
2881 uint32_t sid, uint16_t xri)
2882{
2883 struct lpfc_nvmet_tgtport *tgtp;
2884 struct lpfc_iocbq *abts_wqeq;
2885 union lpfc_wqe *wqe_abts;
2886 unsigned long flags;
2887 int rc;
2888
James Smartce1b5912017-06-01 21:06:58 -07002889 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
2890 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
2891 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2892 ctxp->entry_cnt++;
2893 } else {
2894 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2895 "6418 NVMET LS abort state mismatch "
2896 "IO x%x: %d %d\n",
2897 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2898 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2899 }
2900
James Smartd613b6a2017-02-12 13:52:37 -08002901 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2902 if (!ctxp->wqeq) {
2903 /* Issue ABTS for this WQE based on iotag */
2904 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2905 if (!ctxp->wqeq) {
James Smartce1b5912017-06-01 21:06:58 -07002906 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
James Smartd613b6a2017-02-12 13:52:37 -08002907 "6068 Abort failed: No wqeqs: "
2908 "xri: x%x\n", xri);
2909 /* No failure to an ABTS request. */
2910 kfree(ctxp);
2911 return 0;
2912 }
2913 }
2914 abts_wqeq = ctxp->wqeq;
2915 wqe_abts = &abts_wqeq->wqe;
James Smart6c621a22017-05-15 15:20:45 -07002916
James Smartce1b5912017-06-01 21:06:58 -07002917 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
2918 rc = WQE_BUSY;
2919 goto out;
2920 }
James Smartd613b6a2017-02-12 13:52:37 -08002921
2922 spin_lock_irqsave(&phba->hbalock, flags);
2923 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2924 abts_wqeq->iocb_cmpl = 0;
2925 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
2926 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2927 spin_unlock_irqrestore(&phba->hbalock, flags);
2928 if (rc == WQE_SUCCESS) {
James Smart547077a2017-05-15 15:20:40 -07002929 atomic_inc(&tgtp->xmt_abort_unsol);
James Smartd613b6a2017-02-12 13:52:37 -08002930 return 0;
2931 }
James Smartce1b5912017-06-01 21:06:58 -07002932out:
James Smartd613b6a2017-02-12 13:52:37 -08002933 atomic_inc(&tgtp->xmt_abort_rsp_error);
2934 abts_wqeq->context2 = NULL;
2935 abts_wqeq->context3 = NULL;
2936 lpfc_sli_release_iocbq(phba, abts_wqeq);
2937 kfree(ctxp);
James Smartce1b5912017-06-01 21:06:58 -07002938 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
James Smartd613b6a2017-02-12 13:52:37 -08002939 "6056 Failed to Issue ABTS. Status x%x\n", rc);
2940 return 0;
2941}