blob: b359ae3b78b51e22863138dbac18d33c0f49e645 [file] [log] [blame]
Dupuis, Chad61d86582017-02-15 06:28:23 -08001/*
2 * QLogic FCoE Offload Driver
Chad Dupuis5d1c8b52018-04-25 06:09:04 -07003 * Copyright (c) 2016-2018 Cavium Inc.
Dupuis, Chad61d86582017-02-15 06:28:23 -08004 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11#include "qedf.h"
12#include <scsi/scsi_tcq.h>
13
14void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
16{
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
19}
20
21static void qedf_cmd_timeout(struct work_struct *work)
22{
23
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
Chad Dupuisf3690a82018-04-25 06:09:03 -070026 struct qedf_ctx *qedf;
27 struct qedf_rport *fcport;
Dupuis, Chad61d86582017-02-15 06:28:23 -080028 u8 op = 0;
29
Chad Dupuisf3690a82018-04-25 06:09:03 -070030 if (io_req == NULL) {
31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32 return;
33 }
34
35 fcport = io_req->fcport;
36 if (io_req->fcport == NULL) {
37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
38 return;
39 }
40
41 qedf = fcport->qedf;
42
Dupuis, Chad61d86582017-02-15 06:28:23 -080043 switch (io_req->cmd_type) {
44 case QEDF_ABTS:
Chad Dupuisf3690a82018-04-25 06:09:03 -070045 if (qedf == NULL) {
Saurav Kashyap69ef2c62019-03-26 00:38:38 -070046 QEDF_INFO(NULL, QEDF_LOG_IO,
47 "qedf is NULL for ABTS xid=0x%x.\n",
48 io_req->xid);
Chad Dupuisf3690a82018-04-25 06:09:03 -070049 return;
50 }
51
Dupuis, Chad61d86582017-02-15 06:28:23 -080052 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
53 io_req->xid);
54 /* Cleanup timed out ABTS */
55 qedf_initiate_cleanup(io_req, true);
56 complete(&io_req->abts_done);
57
58 /*
59 * Need to call kref_put for reference taken when initiate_abts
60 * was called since abts_compl won't be called now that we've
61 * cleaned up the task.
62 */
63 kref_put(&io_req->refcount, qedf_release_cmd);
64
Saurav Kashyap69ef2c62019-03-26 00:38:38 -070065 /* Clear in abort bit now that we're done with the command */
66 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
67
Dupuis, Chad61d86582017-02-15 06:28:23 -080068 /*
69 * Now that the original I/O and the ABTS are complete see
70 * if we need to reconnect to the target.
71 */
72 qedf_restart_rport(fcport);
73 break;
74 case QEDF_ELS:
Saurav Kashyap69ef2c62019-03-26 00:38:38 -070075 if (!qedf) {
76 QEDF_INFO(NULL, QEDF_LOG_IO,
77 "qedf is NULL for ELS xid=0x%x.\n",
78 io_req->xid);
79 return;
80 }
81 /* ELS request no longer outstanding since it timed out */
82 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
83
Dupuis, Chad61d86582017-02-15 06:28:23 -080084 kref_get(&io_req->refcount);
85 /*
86 * Don't attempt to clean an ELS timeout as any subseqeunt
87 * ABTS or cleanup requests just hang. For now just free
88 * the resources of the original I/O and the RRQ
89 */
90 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
91 io_req->xid);
92 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
93 /* Call callback function to complete command */
94 if (io_req->cb_func && io_req->cb_arg) {
95 op = io_req->cb_arg->op;
96 io_req->cb_func(io_req->cb_arg);
97 io_req->cb_arg = NULL;
98 }
99 qedf_initiate_cleanup(io_req, true);
100 kref_put(&io_req->refcount, qedf_release_cmd);
101 break;
102 case QEDF_SEQ_CLEANUP:
103 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
104 "xid=0x%x.\n", io_req->xid);
105 qedf_initiate_cleanup(io_req, true);
106 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
107 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
108 break;
109 default:
110 break;
111 }
112}
113
114void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
115{
116 struct io_bdt *bdt_info;
117 struct qedf_ctx *qedf = cmgr->qedf;
118 size_t bd_tbl_sz;
Chad Dupuis650ce642019-03-26 00:38:34 -0700119 u16 min_xid = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800120 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
121 int num_ios;
122 int i;
123 struct qedf_ioreq *io_req;
124
125 num_ios = max_xid - min_xid + 1;
126
127 /* Free fcoe_bdt_ctx structures */
128 if (!cmgr->io_bdt_pool)
129 goto free_cmd_pool;
130
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200131 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800132 for (i = 0; i < num_ios; i++) {
133 bdt_info = cmgr->io_bdt_pool[i];
134 if (bdt_info->bd_tbl) {
135 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
136 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
137 bdt_info->bd_tbl = NULL;
138 }
139 }
140
141 /* Destroy io_bdt pool */
142 for (i = 0; i < num_ios; i++) {
143 kfree(cmgr->io_bdt_pool[i]);
144 cmgr->io_bdt_pool[i] = NULL;
145 }
146
147 kfree(cmgr->io_bdt_pool);
148 cmgr->io_bdt_pool = NULL;
149
150free_cmd_pool:
151
152 for (i = 0; i < num_ios; i++) {
153 io_req = &cmgr->cmds[i];
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200154 kfree(io_req->sgl_task_params);
155 kfree(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800156 /* Make sure we free per command sense buffer */
157 if (io_req->sense_buffer)
158 dma_free_coherent(&qedf->pdev->dev,
159 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
160 io_req->sense_buffer_dma);
161 cancel_delayed_work_sync(&io_req->rrq_work);
162 }
163
164 /* Free command manager itself */
165 vfree(cmgr);
166}
167
168static void qedf_handle_rrq(struct work_struct *work)
169{
170 struct qedf_ioreq *io_req =
171 container_of(work, struct qedf_ioreq, rrq_work.work);
172
Shyam Sundarfaea5712019-03-26 00:38:55 -0700173 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800174 qedf_send_rrq(io_req);
175
176}
177
178struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
179{
180 struct qedf_cmd_mgr *cmgr;
181 struct io_bdt *bdt_info;
182 struct qedf_ioreq *io_req;
183 u16 xid;
184 int i;
185 int num_ios;
Chad Dupuis650ce642019-03-26 00:38:34 -0700186 u16 min_xid = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800187 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
188
189 /* Make sure num_queues is already set before calling this function */
190 if (!qedf->num_queues) {
191 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
192 return NULL;
193 }
194
195 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
196 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
197 "max_xid 0x%x.\n", min_xid, max_xid);
198 return NULL;
199 }
200
201 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
202 "0x%x.\n", min_xid, max_xid);
203
204 num_ios = max_xid - min_xid + 1;
205
206 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
207 if (!cmgr) {
208 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
209 return NULL;
210 }
211
212 cmgr->qedf = qedf;
213 spin_lock_init(&cmgr->lock);
214
215 /*
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200216 * Initialize I/O request fields.
Dupuis, Chad61d86582017-02-15 06:28:23 -0800217 */
Chad Dupuis650ce642019-03-26 00:38:34 -0700218 xid = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800219
220 for (i = 0; i < num_ios; i++) {
221 io_req = &cmgr->cmds[i];
222 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
223
224 io_req->xid = xid++;
225
226 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
227
228 /* Allocate DMA memory to hold sense buffer */
229 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
230 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
231 GFP_KERNEL);
232 if (!io_req->sense_buffer)
233 goto mem_err;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200234
235 /* Allocate task parameters to pass to f/w init funcions */
236 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
237 GFP_KERNEL);
238 if (!io_req->task_params) {
239 QEDF_ERR(&(qedf->dbg_ctx),
240 "Failed to allocate task_params for xid=0x%x\n",
241 i);
242 goto mem_err;
243 }
244
245 /*
246 * Allocate scatter/gather list info to pass to f/w init
247 * functions.
248 */
249 io_req->sgl_task_params = kzalloc(
250 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
251 if (!io_req->sgl_task_params) {
252 QEDF_ERR(&(qedf->dbg_ctx),
253 "Failed to allocate sgl_task_params for xid=0x%x\n",
254 i);
255 goto mem_err;
256 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800257 }
258
259 /* Allocate pool of io_bdts - one for each qedf_ioreq */
260 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
261 GFP_KERNEL);
262
263 if (!cmgr->io_bdt_pool) {
264 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
265 goto mem_err;
266 }
267
268 for (i = 0; i < num_ios; i++) {
269 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
270 GFP_KERNEL);
271 if (!cmgr->io_bdt_pool[i]) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200272 QEDF_WARN(&(qedf->dbg_ctx),
273 "Failed to alloc io_bdt_pool[%d].\n", i);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800274 goto mem_err;
275 }
276 }
277
278 for (i = 0; i < num_ios; i++) {
279 bdt_info = cmgr->io_bdt_pool[i];
280 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200281 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
Dupuis, Chad61d86582017-02-15 06:28:23 -0800282 &bdt_info->bd_tbl_dma, GFP_KERNEL);
283 if (!bdt_info->bd_tbl) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200284 QEDF_WARN(&(qedf->dbg_ctx),
285 "Failed to alloc bdt_tbl[%d].\n", i);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800286 goto mem_err;
287 }
288 }
289 atomic_set(&cmgr->free_list_cnt, num_ios);
290 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
291 "cmgr->free_list_cnt=%d.\n",
292 atomic_read(&cmgr->free_list_cnt));
293
294 return cmgr;
295
296mem_err:
297 qedf_cmd_mgr_free(cmgr);
298 return NULL;
299}
300
301struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
302{
303 struct qedf_ctx *qedf = fcport->qedf;
304 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
305 struct qedf_ioreq *io_req = NULL;
306 struct io_bdt *bd_tbl;
307 u16 xid;
308 uint32_t free_sqes;
309 int i;
310 unsigned long flags;
311
312 free_sqes = atomic_read(&fcport->free_sqes);
313
314 if (!free_sqes) {
315 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
316 "Returning NULL, free_sqes=%d.\n ",
317 free_sqes);
318 goto out_failed;
319 }
320
321 /* Limit the number of outstanding R/W tasks */
322 if ((atomic_read(&fcport->num_active_ios) >=
323 NUM_RW_TASKS_PER_CONNECTION)) {
324 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
325 "Returning NULL, num_active_ios=%d.\n",
326 atomic_read(&fcport->num_active_ios));
327 goto out_failed;
328 }
329
330 /* Limit global TIDs certain tasks */
331 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
332 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
333 "Returning NULL, free_list_cnt=%d.\n",
334 atomic_read(&cmd_mgr->free_list_cnt));
335 goto out_failed;
336 }
337
338 spin_lock_irqsave(&cmd_mgr->lock, flags);
339 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
340 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
341 cmd_mgr->idx++;
342 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
343 cmd_mgr->idx = 0;
344
345 /* Check to make sure command was previously freed */
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700346 if (!io_req->alloc)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800347 break;
348 }
349
350 if (i == FCOE_PARAMS_NUM_TASKS) {
351 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
352 goto out_failed;
353 }
354
Shyam Sundarfeac47f2019-03-26 00:38:53 -0700355 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
356 QEDF_ERR(&qedf->dbg_ctx,
357 "io_req found to be dirty ox_id = 0x%x.\n",
358 io_req->xid);
359
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700360 /* Clear any flags now that we've reallocated the xid */
361 io_req->flags = 0;
362 io_req->alloc = 1;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800363 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
364
365 atomic_inc(&fcport->num_active_ios);
366 atomic_dec(&fcport->free_sqes);
367 xid = io_req->xid;
368 atomic_dec(&cmd_mgr->free_list_cnt);
369
370 io_req->cmd_mgr = cmd_mgr;
371 io_req->fcport = fcport;
372
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700373 /* Clear any stale sc_cmd back pointer */
374 io_req->sc_cmd = NULL;
375 io_req->lun = -1;
376
Dupuis, Chad61d86582017-02-15 06:28:23 -0800377 /* Hold the io_req against deletion */
Shyam Sundarfaea5712019-03-26 00:38:55 -0700378 kref_init(&io_req->refcount); /* ID: 001 */
379 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800380
381 /* Bind io_bdt for this io_req */
382 /* Have a static link between io_req and io_bdt_pool */
383 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
384 if (bd_tbl == NULL) {
385 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
386 kref_put(&io_req->refcount, qedf_release_cmd);
387 goto out_failed;
388 }
389 bd_tbl->io_req = io_req;
390 io_req->cmd_type = cmd_type;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200391 io_req->tm_flags = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800392
393 /* Reset sequence offset data */
394 io_req->rx_buf_off = 0;
395 io_req->tx_buf_off = 0;
396 io_req->rx_id = 0xffff; /* No OX_ID */
397
398 return io_req;
399
400out_failed:
401 /* Record failure for stats and return NULL to caller */
402 qedf->alloc_failures++;
403 return NULL;
404}
405
406static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
407{
408 struct qedf_mp_req *mp_req = &(io_req->mp_req);
409 struct qedf_ctx *qedf = io_req->fcport->qedf;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200410 uint64_t sz = sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800411
412 /* clear tm flags */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800413 if (mp_req->mp_req_bd) {
414 dma_free_coherent(&qedf->pdev->dev, sz,
415 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
416 mp_req->mp_req_bd = NULL;
417 }
418 if (mp_req->mp_resp_bd) {
419 dma_free_coherent(&qedf->pdev->dev, sz,
420 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
421 mp_req->mp_resp_bd = NULL;
422 }
423 if (mp_req->req_buf) {
424 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
425 mp_req->req_buf, mp_req->req_buf_dma);
426 mp_req->req_buf = NULL;
427 }
428 if (mp_req->resp_buf) {
429 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
430 mp_req->resp_buf, mp_req->resp_buf_dma);
431 mp_req->resp_buf = NULL;
432 }
433}
434
435void qedf_release_cmd(struct kref *ref)
436{
437 struct qedf_ioreq *io_req =
438 container_of(ref, struct qedf_ioreq, refcount);
439 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
440 struct qedf_rport *fcport = io_req->fcport;
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700441 unsigned long flags;
442
443 if (io_req->cmd_type == QEDF_SCSI_CMD)
444 WARN_ON(io_req->sc_cmd);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800445
446 if (io_req->cmd_type == QEDF_ELS ||
447 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
448 qedf_free_mp_resc(io_req);
449
450 atomic_inc(&cmd_mgr->free_list_cnt);
451 atomic_dec(&fcport->num_active_ios);
Shyam Sundarfaea5712019-03-26 00:38:55 -0700452 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800453 if (atomic_read(&fcport->num_active_ios) < 0)
454 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
455
456 /* Increment task retry identifier now that the request is released */
457 io_req->task_retry_identifier++;
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700458 io_req->fcport = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800459
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700460 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
461 io_req->cpu = 0;
462 spin_lock_irqsave(&cmd_mgr->lock, flags);
463 io_req->fcport = NULL;
464 io_req->alloc = 0;
465 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800466}
467
Dupuis, Chad61d86582017-02-15 06:28:23 -0800468static int qedf_map_sg(struct qedf_ioreq *io_req)
469{
470 struct scsi_cmnd *sc = io_req->sc_cmd;
471 struct Scsi_Host *host = sc->device->host;
472 struct fc_lport *lport = shost_priv(host);
473 struct qedf_ctx *qedf = lport_priv(lport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200474 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800475 struct scatterlist *sg;
476 int byte_count = 0;
477 int sg_count = 0;
478 int bd_count = 0;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700479 u32 sg_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800480 u64 addr, end_addr;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700481 int i = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800482
483 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
484 scsi_sg_count(sc), sc->sc_data_direction);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800485 sg = scsi_sglist(sc);
486
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700487 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800488
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700489 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
490 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800491
492 scsi_for_each_sg(sc, sg, sg_count, i) {
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700493 sg_len = (u32)sg_dma_len(sg);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800494 addr = (u64)sg_dma_address(sg);
495 end_addr = (u64)(addr + sg_len);
496
497 /*
Dupuis, Chad61d86582017-02-15 06:28:23 -0800498 * Intermediate s/g element so check if start and end address
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700499 * is page aligned. Only required for writes and only if the
500 * number of scatter/gather elements is 8 or more.
Dupuis, Chad61d86582017-02-15 06:28:23 -0800501 */
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700502 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
503 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
504 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800505
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700506 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
507 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
508 bd[bd_count].sge_len = cpu_to_le32(sg_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800509
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700510 bd_count++;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800511 byte_count += sg_len;
512 }
513
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700514 /* To catch a case where FAST and SLOW nothing is set, set FAST */
515 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
516 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
517
Dupuis, Chad61d86582017-02-15 06:28:23 -0800518 if (byte_count != scsi_bufflen(sc))
519 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
520 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
521 scsi_bufflen(sc), io_req->xid);
522
523 return bd_count;
524}
525
526static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
527{
528 struct scsi_cmnd *sc = io_req->sc_cmd;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200529 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800530 int bd_count;
531
532 if (scsi_sg_count(sc)) {
533 bd_count = qedf_map_sg(io_req);
534 if (bd_count == 0)
535 return -ENOMEM;
536 } else {
537 bd_count = 0;
538 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200539 bd[0].sge_len = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800540 }
541 io_req->bd_tbl->bd_valid = bd_count;
542
543 return 0;
544}
545
546static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
547 struct fcp_cmnd *fcp_cmnd)
548{
549 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
550
551 /* fcp_cmnd is 32 bytes */
552 memset(fcp_cmnd, 0, FCP_CMND_LEN);
553
554 /* 8 bytes: SCSI LUN info */
555 int_to_scsilun(sc_cmd->device->lun,
556 (struct scsi_lun *)&fcp_cmnd->fc_lun);
557
558 /* 4 bytes: flag info */
559 fcp_cmnd->fc_pri_ta = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200560 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800561 fcp_cmnd->fc_flags = io_req->io_req_flags;
562 fcp_cmnd->fc_cmdref = 0;
563
564 /* Populate data direction */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200565 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800566 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200567 } else {
568 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
569 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
570 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
571 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
572 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800573
574 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
575
576 /* 16 bytes: CDB information */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200577 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
578 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800579
580 /* 4 bytes: FCP data length */
581 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800582}
583
584static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200585 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200586 struct fcoe_wqe *sqe)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800587{
588 enum fcoe_task_type task_type;
589 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
590 struct io_bdt *bd_tbl = io_req->bd_tbl;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200591 u8 fcp_cmnd[32];
Dupuis, Chad61d86582017-02-15 06:28:23 -0800592 u32 tmp_fcp_cmnd[8];
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200593 int bd_count = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800594 struct qedf_ctx *qedf = fcport->qedf;
595 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200596 struct regpair sense_data_buffer_phys_addr;
597 u32 tx_io_size = 0;
598 u32 rx_io_size = 0;
599 int i, cnt;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800600
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200601 /* Note init_initiator_rw_fcoe_task memsets the task context */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800602 io_req->task = task_ctx;
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200603 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200604 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
605 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800606
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200607 /* Set task type bassed on DMA directio of command */
608 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800609 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800610 } else {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200611 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
612 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
613 tx_io_size = io_req->data_xfer_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800614 } else {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200615 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
616 rx_io_size = io_req->data_xfer_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800617 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800618 }
619
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200620 /* Setup the fields for fcoe_task_params */
621 io_req->task_params->context = task_ctx;
622 io_req->task_params->sqe = sqe;
623 io_req->task_params->task_type = task_type;
624 io_req->task_params->tx_io_size = tx_io_size;
625 io_req->task_params->rx_io_size = rx_io_size;
626 io_req->task_params->conn_cid = fcport->fw_cid;
627 io_req->task_params->itid = io_req->xid;
628 io_req->task_params->cq_rss_number = cq_idx;
629 io_req->task_params->is_tape_device = fcport->dev_type;
630
631 /* Fill in information for scatter/gather list */
632 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
633 bd_count = bd_tbl->bd_valid;
634 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
635 io_req->sgl_task_params->sgl_phys_addr.lo =
636 U64_LO(bd_tbl->bd_tbl_dma);
637 io_req->sgl_task_params->sgl_phys_addr.hi =
638 U64_HI(bd_tbl->bd_tbl_dma);
639 io_req->sgl_task_params->num_sges = bd_count;
640 io_req->sgl_task_params->total_buffer_size =
641 scsi_bufflen(io_req->sc_cmd);
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700642 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
643 io_req->sgl_task_params->small_mid_sge = 1;
644 else
645 io_req->sgl_task_params->small_mid_sge = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200646 }
647
648 /* Fill in physical address of sense buffer */
649 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
650 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
651
Dupuis, Chad61d86582017-02-15 06:28:23 -0800652 /* fill FCP_CMND IU */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200653 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800654
655 /* Swap fcp_cmnd since FC is big endian */
656 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800657 for (i = 0; i < cnt; i++) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200658 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800659 }
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200660 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800661
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200662 init_initiator_rw_fcoe_task(io_req->task_params,
663 io_req->sgl_task_params,
664 sense_data_buffer_phys_addr,
665 io_req->task_retry_identifier, fcp_cmnd);
666
667 /* Increment SGL type counters */
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700668 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200669 qedf->slow_sge_ios++;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700670 else
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200671 qedf->fast_sge_ios++;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800672}
673
674void qedf_init_mp_task(struct qedf_ioreq *io_req,
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200675 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800676{
677 struct qedf_mp_req *mp_req = &(io_req->mp_req);
678 struct qedf_rport *fcport = io_req->fcport;
679 struct qedf_ctx *qedf = io_req->fcport->qedf;
680 struct fc_frame_header *fc_hdr;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200681 struct fcoe_tx_mid_path_params task_fc_hdr;
682 struct scsi_sgl_task_params tx_sgl_task_params;
683 struct scsi_sgl_task_params rx_sgl_task_params;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800684
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200685 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
686 "Initializing MP task for cmd_type=%d\n",
687 io_req->cmd_type);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800688
689 qedf->control_requests++;
690
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200691 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
692 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200693 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200694 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800695
696 /* Setup the task from io_req for easy reference */
697 io_req->task = task_ctx;
698
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200699 /* Setup the fields for fcoe_task_params */
700 io_req->task_params->context = task_ctx;
701 io_req->task_params->sqe = sqe;
702 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
703 io_req->task_params->tx_io_size = io_req->data_xfer_len;
704 /* rx_io_size tells the f/w how large a response buffer we have */
705 io_req->task_params->rx_io_size = PAGE_SIZE;
706 io_req->task_params->conn_cid = fcport->fw_cid;
707 io_req->task_params->itid = io_req->xid;
708 /* Return middle path commands on CQ 0 */
709 io_req->task_params->cq_rss_number = 0;
710 io_req->task_params->is_tape_device = fcport->dev_type;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800711
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200712 fc_hdr = &(mp_req->req_fc_hdr);
713 /* Set OX_ID and RX_ID based on driver task id */
714 fc_hdr->fh_ox_id = io_req->xid;
715 fc_hdr->fh_rx_id = htons(0xffff);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800716
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200717 /* Set up FC header information */
718 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
719 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
720 task_fc_hdr.type = fc_hdr->fh_type;
721 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
722 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
723 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
724 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800725
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200726 /* Set up s/g list parameters for request buffer */
727 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
728 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
729 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
730 tx_sgl_task_params.num_sges = 1;
731 /* Set PAGE_SIZE for now since sg element is that size ??? */
732 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
733 tx_sgl_task_params.small_mid_sge = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800734
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200735 /* Set up s/g list parameters for request buffer */
736 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
737 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
738 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
739 rx_sgl_task_params.num_sges = 1;
740 /* Set PAGE_SIZE for now since sg element is that size ??? */
741 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
742 rx_sgl_task_params.small_mid_sge = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800743
Dupuis, Chad61d86582017-02-15 06:28:23 -0800744
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200745 /*
746 * Last arg is 0 as previous code did not set that we wanted the
747 * fc header information.
748 */
749 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
750 &task_fc_hdr,
751 &tx_sgl_task_params,
752 &rx_sgl_task_params, 0);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800753}
754
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200755/* Presumed that fcport->rport_lock is held */
756u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800757{
Dupuis, Chad61d86582017-02-15 06:28:23 -0800758 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200759 u16 rval;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800760
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200761 rval = fcport->sq_prod_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800762
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200763 /* Adjust ring index */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800764 fcport->sq_prod_idx++;
765 fcport->fw_sq_prod_idx++;
766 if (fcport->sq_prod_idx == total_sqe)
767 fcport->sq_prod_idx = 0;
768
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200769 return rval;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800770}
771
772void qedf_ring_doorbell(struct qedf_rport *fcport)
773{
774 struct fcoe_db_data dbell = { 0 };
775
776 dbell.agg_flags = 0;
777
778 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
779 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
780 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
781 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
782
783 dbell.sq_prod = fcport->fw_sq_prod_idx;
Andrew Vasquez90ccf752019-03-26 00:38:40 -0700784 /* wmb makes sure that the BDs data is updated before updating the
785 * producer, otherwise FW may read old data from the BDs.
786 */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800787 wmb();
Andrew Vasquez90ccf752019-03-26 00:38:40 -0700788 barrier();
789 writel(*(u32 *)&dbell, fcport->p_doorbell);
790 /*
791 * Fence required to flush the write combined buffer, since another
792 * CPU may write to the same doorbell address and data may be lost
793 * due to relaxed order nature of write combined bar.
794 */
795 wmb();
Dupuis, Chad61d86582017-02-15 06:28:23 -0800796}
797
798static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
799 int8_t direction)
800{
801 struct qedf_ctx *qedf = fcport->qedf;
802 struct qedf_io_log *io_log;
803 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
804 unsigned long flags;
805 uint8_t op;
806
807 spin_lock_irqsave(&qedf->io_trace_lock, flags);
808
809 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
810 io_log->direction = direction;
811 io_log->task_id = io_req->xid;
812 io_log->port_id = fcport->rdata->ids.port_id;
813 io_log->lun = sc_cmd->device->lun;
814 io_log->op = op = sc_cmd->cmnd[0];
815 io_log->lba[0] = sc_cmd->cmnd[2];
816 io_log->lba[1] = sc_cmd->cmnd[3];
817 io_log->lba[2] = sc_cmd->cmnd[4];
818 io_log->lba[3] = sc_cmd->cmnd[5];
819 io_log->bufflen = scsi_bufflen(sc_cmd);
820 io_log->sg_count = scsi_sg_count(sc_cmd);
821 io_log->result = sc_cmd->result;
822 io_log->jiffies = jiffies;
Dupuis, Chad1afca6b2017-02-23 07:01:03 -0800823 io_log->refcount = kref_read(&io_req->refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800824
825 if (direction == QEDF_IO_TRACE_REQ) {
826 /* For requests we only care abot the submission CPU */
827 io_log->req_cpu = io_req->cpu;
828 io_log->int_cpu = 0;
829 io_log->rsp_cpu = 0;
830 } else if (direction == QEDF_IO_TRACE_RSP) {
831 io_log->req_cpu = io_req->cpu;
832 io_log->int_cpu = io_req->int_cpu;
833 io_log->rsp_cpu = smp_processor_id();
834 }
835
836 io_log->sge_type = io_req->sge_type;
837
838 qedf->io_trace_idx++;
839 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
840 qedf->io_trace_idx = 0;
841
842 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
843}
844
845int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
846{
847 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
848 struct Scsi_Host *host = sc_cmd->device->host;
849 struct fc_lport *lport = shost_priv(host);
850 struct qedf_ctx *qedf = lport_priv(lport);
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200851 struct e4_fcoe_task_context *task_ctx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800852 u16 xid;
853 enum fcoe_task_type req_type = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200854 struct fcoe_wqe *sqe;
855 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800856
857 /* Initialize rest of io_req fileds */
858 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
859 sc_cmd->SCp.ptr = (char *)io_req;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700860 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800861
862 /* Record which cpu this request is associated with */
863 io_req->cpu = smp_processor_id();
864
865 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
866 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
867 io_req->io_req_flags = QEDF_READ;
868 qedf->input_requests++;
869 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
870 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
871 io_req->io_req_flags = QEDF_WRITE;
872 qedf->output_requests++;
873 } else {
874 io_req->io_req_flags = 0;
875 qedf->control_requests++;
876 }
877
878 xid = io_req->xid;
879
880 /* Build buffer descriptor list for firmware from sg list */
881 if (qedf_build_bd_list_from_sg(io_req)) {
882 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700883 /* Release cmd will release io_req, but sc_cmd is assigned */
884 io_req->sc_cmd = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800885 kref_put(&io_req->refcount, qedf_release_cmd);
886 return -EAGAIN;
887 }
888
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700889 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
890 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200891 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700892 /* Release cmd will release io_req, but sc_cmd is assigned */
893 io_req->sc_cmd = NULL;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200894 kref_put(&io_req->refcount, qedf_release_cmd);
Chad Dupuisc5e06ba2019-03-26 00:38:35 -0700895 return -EINVAL;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200896 }
897
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700898 /* Record LUN number for later use if we neeed them */
899 io_req->lun = (int)sc_cmd->device->lun;
900
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200901 /* Obtain free SQE */
902 sqe_idx = qedf_get_sqe_idx(fcport);
903 sqe = &fcport->sq[sqe_idx];
904 memset(sqe, 0, sizeof(struct fcoe_wqe));
905
Dupuis, Chad61d86582017-02-15 06:28:23 -0800906 /* Get the task context */
907 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
908 if (!task_ctx) {
909 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
910 xid);
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700911 /* Release cmd will release io_req, but sc_cmd is assigned */
912 io_req->sc_cmd = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800913 kref_put(&io_req->refcount, qedf_release_cmd);
914 return -EINVAL;
915 }
916
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200917 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800918
919 /* Ring doorbell */
920 qedf_ring_doorbell(fcport);
921
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700922 /* Set that command is with the firmware now */
923 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
924
Dupuis, Chad61d86582017-02-15 06:28:23 -0800925 if (qedf_io_tracing && io_req->sc_cmd)
926 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
927
928 return false;
929}
930
931int
932qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
933{
934 struct fc_lport *lport = shost_priv(host);
935 struct qedf_ctx *qedf = lport_priv(lport);
936 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
937 struct fc_rport_libfc_priv *rp = rport->dd_data;
Colin Ian King8d6febb2018-02-06 14:03:16 +0000938 struct qedf_rport *fcport;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800939 struct qedf_ioreq *io_req;
940 int rc = 0;
941 int rval;
942 unsigned long flags = 0;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700943 int num_sgs = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800944
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700945 num_sgs = scsi_sg_count(sc_cmd);
946 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
947 QEDF_ERR(&qedf->dbg_ctx,
948 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
949 num_sgs, QEDF_MAX_BDS_PER_CMD);
950 sc_cmd->result = DID_ERROR;
951 sc_cmd->scsi_done(sc_cmd);
952 return 0;
953 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800954
955 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
956 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
957 sc_cmd->result = DID_NO_CONNECT << 16;
958 sc_cmd->scsi_done(sc_cmd);
959 return 0;
960 }
961
Chad Dupuisa8f192b2018-04-25 06:08:54 -0700962 if (!qedf->pdev->msix_enabled) {
963 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
964 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
965 sc_cmd);
966 sc_cmd->result = DID_NO_CONNECT << 16;
967 sc_cmd->scsi_done(sc_cmd);
968 return 0;
969 }
970
Dupuis, Chad61d86582017-02-15 06:28:23 -0800971 rval = fc_remote_port_chkready(rport);
972 if (rval) {
973 sc_cmd->result = rval;
974 sc_cmd->scsi_done(sc_cmd);
975 return 0;
976 }
977
978 /* Retry command if we are doing a qed drain operation */
979 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
980 rc = SCSI_MLQUEUE_HOST_BUSY;
981 goto exit_qcmd;
982 }
983
984 if (lport->state != LPORT_ST_READY ||
985 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
986 rc = SCSI_MLQUEUE_HOST_BUSY;
987 goto exit_qcmd;
988 }
989
990 /* rport and tgt are allocated together, so tgt should be non-NULL */
991 fcport = (struct qedf_rport *)&rp[1];
992
Shyam Sundarff543e22019-03-26 00:38:51 -0700993 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
994 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800995 /*
996 * Session is not offloaded yet. Let SCSI-ml retry
997 * the command.
998 */
999 rc = SCSI_MLQUEUE_TARGET_BUSY;
1000 goto exit_qcmd;
1001 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001002
1003 atomic_inc(&fcport->ios_to_queue);
1004
Dupuis, Chad61d86582017-02-15 06:28:23 -08001005 if (fcport->retry_delay_timestamp) {
1006 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1007 fcport->retry_delay_timestamp = 0;
1008 } else {
1009 /* If retry_delay timer is active, flow off the ML */
1010 rc = SCSI_MLQUEUE_TARGET_BUSY;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001011 atomic_dec(&fcport->ios_to_queue);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001012 goto exit_qcmd;
1013 }
1014 }
1015
1016 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1017 if (!io_req) {
1018 rc = SCSI_MLQUEUE_HOST_BUSY;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001019 atomic_dec(&fcport->ios_to_queue);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001020 goto exit_qcmd;
1021 }
1022
1023 io_req->sc_cmd = sc_cmd;
1024
1025 /* Take fcport->rport_lock for posting to fcport send queue */
1026 spin_lock_irqsave(&fcport->rport_lock, flags);
1027 if (qedf_post_io_req(fcport, io_req)) {
1028 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1029 /* Return SQE to pool */
1030 atomic_inc(&fcport->free_sqes);
1031 rc = SCSI_MLQUEUE_HOST_BUSY;
1032 }
1033 spin_unlock_irqrestore(&fcport->rport_lock, flags);
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001034 atomic_dec(&fcport->ios_to_queue);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001035
1036exit_qcmd:
1037 return rc;
1038}
1039
1040static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1041 struct fcoe_cqe_rsp_info *fcp_rsp)
1042{
1043 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1044 struct qedf_ctx *qedf = io_req->fcport->qedf;
1045 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1046 int fcp_sns_len = 0;
1047 int fcp_rsp_len = 0;
1048 uint8_t *rsp_info, *sense_data;
1049
1050 io_req->fcp_status = FC_GOOD;
1051 io_req->fcp_resid = 0;
1052 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1053 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1054 io_req->fcp_resid = fcp_rsp->fcp_resid;
1055
1056 io_req->scsi_comp_flags = rsp_flags;
1057 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1058 fcp_rsp->scsi_status_code;
1059
1060 if (rsp_flags &
1061 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1062 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1063
1064 if (rsp_flags &
1065 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1066 fcp_sns_len = fcp_rsp->fcp_sns_len;
1067
1068 io_req->fcp_rsp_len = fcp_rsp_len;
1069 io_req->fcp_sns_len = fcp_sns_len;
1070 rsp_info = sense_data = io_req->sense_buffer;
1071
1072 /* fetch fcp_rsp_code */
1073 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1074 /* Only for task management function */
1075 io_req->fcp_rsp_code = rsp_info[3];
1076 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1077 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1078 /* Adjust sense-data location. */
1079 sense_data += fcp_rsp_len;
1080 }
1081
1082 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1083 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1084 "Truncating sense buffer\n");
1085 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1086 }
1087
Dupuis, Chad16a61112017-06-02 12:02:05 -07001088 /* The sense buffer can be NULL for TMF commands */
1089 if (sc_cmd->sense_buffer) {
1090 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1091 if (fcp_sns_len)
1092 memcpy(sc_cmd->sense_buffer, sense_data,
1093 fcp_sns_len);
1094 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001095}
1096
1097static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1098{
1099 struct scsi_cmnd *sc = io_req->sc_cmd;
1100
1101 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1102 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1103 scsi_sg_count(sc), sc->sc_data_direction);
1104 io_req->bd_tbl->bd_valid = 0;
1105 }
1106}
1107
1108void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1109 struct qedf_ioreq *io_req)
1110{
Saurav Kashyap582a4722019-03-26 00:38:52 -07001111 u16 xid;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02001112 struct e4_fcoe_task_context *task_ctx;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001113 struct scsi_cmnd *sc_cmd;
1114 struct fcoe_cqe_rsp_info *fcp_rsp;
1115 struct qedf_rport *fcport;
1116 int refcount;
1117 u16 scope, qualifier = 0;
1118 u8 fw_residual_flag = 0;
1119
1120 if (!io_req)
1121 return;
1122 if (!cqe)
1123 return;
1124
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001125 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1126 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1127 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1128 QEDF_ERR(&qedf->dbg_ctx,
1129 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1130 io_req->xid);
1131 return;
1132 }
1133
Dupuis, Chad61d86582017-02-15 06:28:23 -08001134 xid = io_req->xid;
1135 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1136 sc_cmd = io_req->sc_cmd;
1137 fcp_rsp = &cqe->cqe_info.rsp_info;
1138
1139 if (!sc_cmd) {
1140 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1141 return;
1142 }
1143
1144 if (!sc_cmd->SCp.ptr) {
1145 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1146 "another context.\n");
1147 return;
1148 }
1149
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001150 if (!sc_cmd->device) {
1151 QEDF_ERR(&qedf->dbg_ctx,
1152 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1153 return;
1154 }
1155
Dupuis, Chad61d86582017-02-15 06:28:23 -08001156 if (!sc_cmd->request) {
1157 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1158 "sc_cmd=%p.\n", sc_cmd);
1159 return;
1160 }
1161
Dupuis, Chad61d86582017-02-15 06:28:23 -08001162 if (!sc_cmd->request->q) {
1163 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1164 "is not valid, sc_cmd=%p.\n", sc_cmd);
1165 return;
1166 }
1167
1168 fcport = io_req->fcport;
1169
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001170 /*
1171 * When flush is active, let the cmds be completed from the cleanup
1172 * context
1173 */
1174 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1175 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1176 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1177 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1178 "Dropping good completion xid=0x%x as fcport is flushing",
1179 io_req->xid);
1180 return;
1181 }
1182
Dupuis, Chad61d86582017-02-15 06:28:23 -08001183 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1184
1185 qedf_unmap_sg_list(qedf, io_req);
1186
1187 /* Check for FCP transport error */
1188 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1189 QEDF_ERR(&(qedf->dbg_ctx),
1190 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1191 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1192 io_req->fcp_rsp_code);
1193 sc_cmd->result = DID_BUS_BUSY << 16;
1194 goto out;
1195 }
1196
1197 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1198 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1199 if (fw_residual_flag) {
1200 QEDF_ERR(&(qedf->dbg_ctx),
1201 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1202 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1203 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1204 cqe->cqe_info.rsp_info.fw_residual);
1205
1206 if (io_req->cdb_status == 0)
1207 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1208 else
1209 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1210
Dupuis, Chad61d86582017-02-15 06:28:23 -08001211 /*
1212 * Set resid to the whole buffer length so we won't try to resue
1213 * any previously data.
1214 */
1215 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1216 goto out;
1217 }
1218
1219 switch (io_req->fcp_status) {
1220 case FC_GOOD:
1221 if (io_req->cdb_status == 0) {
1222 /* Good I/O completion */
1223 sc_cmd->result = DID_OK << 16;
1224 } else {
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001225 refcount = kref_read(&io_req->refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001226 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
Joe Perchesdb6b2062017-03-04 00:07:04 -08001227 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
Dupuis, Chad61d86582017-02-15 06:28:23 -08001228 "lba=%02x%02x%02x%02x cdb_status=%d "
1229 "fcp_resid=0x%x refcount=%d.\n",
1230 qedf->lport->host->host_no, sc_cmd->device->id,
1231 sc_cmd->device->lun, io_req->xid,
1232 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1233 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1234 io_req->cdb_status, io_req->fcp_resid,
1235 refcount);
1236 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1237
1238 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1239 io_req->cdb_status == SAM_STAT_BUSY) {
1240 /*
1241 * Check whether we need to set retry_delay at
1242 * all based on retry_delay module parameter
1243 * and the status qualifier.
1244 */
1245
1246 /* Upper 2 bits */
1247 scope = fcp_rsp->retry_delay_timer & 0xC000;
1248 /* Lower 14 bits */
1249 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1250
1251 if (qedf_retry_delay &&
1252 scope > 0 && qualifier > 0 &&
1253 qualifier <= 0x3FEF) {
1254 /* Check we don't go over the max */
1255 if (qualifier > QEDF_RETRY_DELAY_MAX)
1256 qualifier =
1257 QEDF_RETRY_DELAY_MAX;
1258 fcport->retry_delay_timestamp =
1259 jiffies + (qualifier * HZ / 10);
1260 }
Chad Dupuis642a0b32018-05-22 00:28:43 -07001261 /* Record stats */
1262 if (io_req->cdb_status ==
1263 SAM_STAT_TASK_SET_FULL)
1264 qedf->task_set_fulls++;
1265 else
1266 qedf->busy++;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001267 }
1268 }
1269 if (io_req->fcp_resid)
1270 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1271 break;
1272 default:
1273 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1274 io_req->fcp_status);
1275 break;
1276 }
1277
1278out:
1279 if (qedf_io_tracing)
1280 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1281
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001282 /*
1283 * We wait till the end of the function to clear the
1284 * outstanding bit in case we need to send an abort
1285 */
1286 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1287
Dupuis, Chad61d86582017-02-15 06:28:23 -08001288 io_req->sc_cmd = NULL;
1289 sc_cmd->SCp.ptr = NULL;
1290 sc_cmd->scsi_done(sc_cmd);
1291 kref_put(&io_req->refcount, qedf_release_cmd);
1292}
1293
1294/* Return a SCSI command in some other context besides a normal completion */
1295void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1296 int result)
1297{
1298 u16 xid;
1299 struct scsi_cmnd *sc_cmd;
1300 int refcount;
1301
1302 if (!io_req)
1303 return;
1304
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001305 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1306 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1307 "io_req:%p scsi_done handling already done\n",
1308 io_req);
1309 return;
1310 }
1311
1312 /*
1313 * We will be done with this command after this call so clear the
1314 * outstanding bit.
1315 */
1316 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1317
Dupuis, Chad61d86582017-02-15 06:28:23 -08001318 xid = io_req->xid;
1319 sc_cmd = io_req->sc_cmd;
1320
1321 if (!sc_cmd) {
1322 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1323 return;
1324 }
1325
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001326 if (!virt_addr_valid(sc_cmd)) {
1327 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
Chad Dupuis627cc7d2019-03-26 00:38:46 -07001328 goto bad_scsi_ptr;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001329 }
1330
Dupuis, Chad61d86582017-02-15 06:28:23 -08001331 if (!sc_cmd->SCp.ptr) {
1332 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1333 "another context.\n");
1334 return;
1335 }
1336
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001337 if (!sc_cmd->device) {
1338 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1339 sc_cmd);
Chad Dupuis627cc7d2019-03-26 00:38:46 -07001340 goto bad_scsi_ptr;
1341 }
1342
1343 if (!virt_addr_valid(sc_cmd->device)) {
1344 QEDF_ERR(&qedf->dbg_ctx,
1345 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1346 goto bad_scsi_ptr;
1347 }
1348
1349 if (!sc_cmd->sense_buffer) {
1350 QEDF_ERR(&qedf->dbg_ctx,
1351 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1352 sc_cmd);
1353 goto bad_scsi_ptr;
1354 }
1355
1356 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1357 QEDF_ERR(&qedf->dbg_ctx,
1358 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1359 sc_cmd);
1360 goto bad_scsi_ptr;
1361 }
1362
1363 if (!sc_cmd->scsi_done) {
1364 QEDF_ERR(&qedf->dbg_ctx,
1365 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1366 sc_cmd);
1367 goto bad_scsi_ptr;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001368 }
1369
Dupuis, Chad61d86582017-02-15 06:28:23 -08001370 qedf_unmap_sg_list(qedf, io_req);
1371
1372 sc_cmd->result = result << 16;
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001373 refcount = kref_read(&io_req->refcount);
Joe Perchesdb6b2062017-03-04 00:07:04 -08001374 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
Dupuis, Chad61d86582017-02-15 06:28:23 -08001375 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1376 "allowed=%d retries=%d refcount=%d.\n",
1377 qedf->lport->host->host_no, sc_cmd->device->id,
1378 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1379 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1380 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1381 refcount);
1382
1383 /*
1384 * Set resid to the whole buffer length so we won't try to resue any
1385 * previously read data
1386 */
1387 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1388
1389 if (qedf_io_tracing)
1390 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1391
1392 io_req->sc_cmd = NULL;
1393 sc_cmd->SCp.ptr = NULL;
1394 sc_cmd->scsi_done(sc_cmd);
1395 kref_put(&io_req->refcount, qedf_release_cmd);
Chad Dupuis6f15d0c2019-03-26 00:38:48 -07001396 return;
Chad Dupuis627cc7d2019-03-26 00:38:46 -07001397
1398bad_scsi_ptr:
1399 /*
1400 * Clear the io_req->sc_cmd backpointer so we don't try to process
1401 * this again
1402 */
1403 io_req->sc_cmd = NULL;
1404 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001405}
1406
1407/*
1408 * Handle warning type CQE completions. This is mainly used for REC timer
1409 * popping.
1410 */
1411void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1412 struct qedf_ioreq *io_req)
1413{
1414 int rval, i;
1415 struct qedf_rport *fcport = io_req->fcport;
1416 u64 err_warn_bit_map;
1417 u8 err_warn = 0xff;
1418
1419 if (!cqe)
1420 return;
1421
1422 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1423 "xid=0x%x\n", io_req->xid);
1424 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1425 "err_warn_bitmap=%08x:%08x\n",
1426 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1427 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1428 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1429 "rx_buff_off=%08x, rx_id=%04x\n",
1430 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1431 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1432 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1433
1434 /* Normalize the error bitmap value to an just an unsigned int */
1435 err_warn_bit_map = (u64)
1436 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1437 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1438 for (i = 0; i < 64; i++) {
1439 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1440 err_warn = i;
1441 break;
1442 }
1443 }
1444
1445 /* Check if REC TOV expired if this is a tape device */
1446 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1447 if (err_warn ==
1448 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1449 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1450 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1451 io_req->rx_buf_off =
1452 cqe->cqe_info.err_info.rx_buf_off;
1453 io_req->tx_buf_off =
1454 cqe->cqe_info.err_info.tx_buf_off;
1455 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1456 rval = qedf_send_rec(io_req);
1457 /*
1458 * We only want to abort the io_req if we
1459 * can't queue the REC command as we want to
1460 * keep the exchange open for recovery.
1461 */
1462 if (rval)
1463 goto send_abort;
1464 }
1465 return;
1466 }
1467 }
1468
1469send_abort:
1470 init_completion(&io_req->abts_done);
1471 rval = qedf_initiate_abts(io_req, true);
1472 if (rval)
1473 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1474}
1475
1476/* Cleanup a command when we receive an error detection completion */
1477void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1478 struct qedf_ioreq *io_req)
1479{
1480 int rval;
1481
1482 if (!cqe)
1483 return;
1484
1485 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1486 "xid=0x%x\n", io_req->xid);
1487 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1488 "err_warn_bitmap=%08x:%08x\n",
1489 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1490 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1491 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1492 "rx_buff_off=%08x, rx_id=%04x\n",
1493 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1494 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1495 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1496
1497 if (qedf->stop_io_on_error) {
1498 qedf_stop_all_io(qedf);
1499 return;
1500 }
1501
1502 init_completion(&io_req->abts_done);
1503 rval = qedf_initiate_abts(io_req, true);
1504 if (rval)
1505 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1506}
1507
1508static void qedf_flush_els_req(struct qedf_ctx *qedf,
1509 struct qedf_ioreq *els_req)
1510{
1511 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1512 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001513 kref_read(&els_req->refcount));
Dupuis, Chad61d86582017-02-15 06:28:23 -08001514
1515 /*
1516 * Need to distinguish this from a timeout when calling the
1517 * els_req->cb_func.
1518 */
1519 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1520
1521 /* Cancel the timer */
1522 cancel_delayed_work_sync(&els_req->timeout_work);
1523
1524 /* Call callback function to complete command */
1525 if (els_req->cb_func && els_req->cb_arg) {
1526 els_req->cb_func(els_req->cb_arg);
1527 els_req->cb_arg = NULL;
1528 }
1529
1530 /* Release kref for original initiate_els */
1531 kref_put(&els_req->refcount, qedf_release_cmd);
1532}
1533
1534/* A value of -1 for lun is a wild card that means flush all
1535 * active SCSI I/Os for the target.
1536 */
1537void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1538{
1539 struct qedf_ioreq *io_req;
1540 struct qedf_ctx *qedf;
1541 struct qedf_cmd_mgr *cmd_mgr;
1542 int i, rc;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001543 unsigned long flags;
1544 int flush_cnt = 0;
1545 int wait_cnt = 100;
1546 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001547
1548 if (!fcport)
1549 return;
1550
Chad Dupuis766639c2018-04-25 06:08:49 -07001551 /* Check that fcport is still offloaded */
1552 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1553 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1554 return;
1555 }
1556
Dupuis, Chad61d86582017-02-15 06:28:23 -08001557 qedf = fcport->qedf;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001558
1559 if (!qedf) {
1560 QEDF_ERR(NULL, "qedf is NULL.\n");
1561 return;
1562 }
1563
1564 /* Only wait for all commands to be queued in the Upload context */
1565 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1566 (lun == -1)) {
1567 while (atomic_read(&fcport->ios_to_queue)) {
1568 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1569 "Waiting for %d I/Os to be queued\n",
1570 atomic_read(&fcport->ios_to_queue));
1571 if (wait_cnt == 0) {
1572 QEDF_ERR(NULL,
1573 "%d IOs request could not be queued\n",
1574 atomic_read(&fcport->ios_to_queue));
1575 }
1576 msleep(20);
1577 wait_cnt--;
1578 }
1579 }
1580
Dupuis, Chad61d86582017-02-15 06:28:23 -08001581 cmd_mgr = qedf->cmd_mgr;
1582
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001583 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1584 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1585 atomic_read(&fcport->num_active_ios), fcport,
1586 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1587 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1588
1589 mutex_lock(&qedf->flush_mutex);
1590 if (lun == -1) {
1591 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1592 } else {
1593 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1594 fcport->lun_reset_lun = lun;
1595 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001596
1597 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1598 io_req = &cmd_mgr->cmds[i];
1599
1600 if (!io_req)
1601 continue;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001602 if (!io_req->fcport)
1603 continue;
1604
1605 spin_lock_irqsave(&cmd_mgr->lock, flags);
1606
1607 if (io_req->alloc) {
1608 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1609 if (io_req->cmd_type == QEDF_SCSI_CMD)
1610 QEDF_ERR(&qedf->dbg_ctx,
1611 "Allocated but not queued, xid=0x%x\n",
1612 io_req->xid);
1613 }
1614 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1615 } else {
1616 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1617 continue;
1618 }
1619
Dupuis, Chad61d86582017-02-15 06:28:23 -08001620 if (io_req->fcport != fcport)
1621 continue;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001622
1623 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1624 * but RRQ is still pending.
1625 * Workaround: Within qedf_send_rrq, we check if the fcport is
1626 * NULL, and we drop the ref on the io_req to clean it up.
1627 */
1628 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1629 refcount = kref_read(&io_req->refcount);
1630 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1631 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1632 io_req->xid, io_req->cmd_type, refcount);
Shyam Sundarfaea5712019-03-26 00:38:55 -07001633 /* If RRQ work has been queue, try to cancel it and
1634 * free the io_req
1635 */
1636 if (atomic_read(&io_req->state) ==
1637 QEDFC_CMD_ST_RRQ_WAIT) {
1638 if (cancel_delayed_work_sync
1639 (&io_req->rrq_work)) {
1640 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1641 "Putting reference for pending RRQ work xid=0x%x.\n",
1642 io_req->xid);
1643 /* ID: 003 */
1644 kref_put(&io_req->refcount,
1645 qedf_release_cmd);
1646 }
1647 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001648 continue;
1649 }
1650
1651 /* Only consider flushing ELS during target reset */
1652 if (io_req->cmd_type == QEDF_ELS &&
1653 lun == -1) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08001654 rc = kref_get_unless_zero(&io_req->refcount);
1655 if (!rc) {
1656 QEDF_ERR(&(qedf->dbg_ctx),
Chad Dupuis8025c842018-04-25 06:08:56 -07001657 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1658 io_req, io_req->xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001659 continue;
1660 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001661 flush_cnt++;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001662 qedf_flush_els_req(qedf, io_req);
1663 /*
1664 * Release the kref and go back to the top of the
1665 * loop.
1666 */
1667 goto free_cmd;
1668 }
1669
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001670 if (io_req->cmd_type == QEDF_ABTS) {
Shyam Sundarfaea5712019-03-26 00:38:55 -07001671 /* ID: 004 */
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001672 rc = kref_get_unless_zero(&io_req->refcount);
1673 if (!rc) {
1674 QEDF_ERR(&(qedf->dbg_ctx),
1675 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1676 io_req, io_req->xid);
1677 continue;
1678 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001679 if (lun != -1 && io_req->lun != lun)
1680 goto free_cmd;
1681
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001682 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1683 "Flushing abort xid=0x%x.\n", io_req->xid);
1684
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001685 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1686 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
Shyam Sundarfaea5712019-03-26 00:38:55 -07001687 "Putting ref for cancelled RRQ work xid=0x%x.\n",
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001688 io_req->xid);
1689 kref_put(&io_req->refcount, qedf_release_cmd);
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001690 }
1691
Shyam Sundarfaea5712019-03-26 00:38:55 -07001692 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1693 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1694 "Putting ref for cancelled tmo work xid=0x%x.\n",
1695 io_req->xid);
1696 qedf_initiate_cleanup(io_req, true);
1697 /* Notify eh_abort handler that ABTS is
1698 * complete
1699 */
1700 complete(&io_req->abts_done);
1701 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1702 /* ID: 002 */
1703 kref_put(&io_req->refcount, qedf_release_cmd);
1704 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001705 flush_cnt++;
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001706 goto free_cmd;
1707 }
1708
Dupuis, Chad61d86582017-02-15 06:28:23 -08001709 if (!io_req->sc_cmd)
1710 continue;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001711 if (!io_req->sc_cmd->device) {
1712 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1713 "Device backpointer NULL for sc_cmd=%p.\n",
1714 io_req->sc_cmd);
1715 /* Put reference for non-existent scsi_cmnd */
1716 io_req->sc_cmd = NULL;
1717 qedf_initiate_cleanup(io_req, false);
1718 kref_put(&io_req->refcount, qedf_release_cmd);
1719 continue;
1720 }
1721 if (lun > -1) {
1722 if (io_req->lun != lun)
Dupuis, Chad61d86582017-02-15 06:28:23 -08001723 continue;
1724 }
1725
1726 /*
1727 * Use kref_get_unless_zero in the unlikely case the command
1728 * we're about to flush was completed in the normal SCSI path
1729 */
1730 rc = kref_get_unless_zero(&io_req->refcount);
1731 if (!rc) {
1732 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
Chad Dupuis8025c842018-04-25 06:08:56 -07001733 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001734 continue;
1735 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001736
Dupuis, Chad61d86582017-02-15 06:28:23 -08001737 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1738 "Cleanup xid=0x%x.\n", io_req->xid);
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001739 flush_cnt++;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001740
1741 /* Cleanup task and return I/O mid-layer */
1742 qedf_initiate_cleanup(io_req, true);
1743
1744free_cmd:
Shyam Sundarfaea5712019-03-26 00:38:55 -07001745 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001746 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001747
1748 wait_cnt = 60;
1749 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1750 "Flushed 0x%x I/Os, active=0x%x.\n",
1751 flush_cnt, atomic_read(&fcport->num_active_ios));
1752 /* Only wait for all commands to complete in the Upload context */
1753 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1754 (lun == -1)) {
1755 while (atomic_read(&fcport->num_active_ios)) {
1756 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1757 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1758 flush_cnt,
1759 atomic_read(&fcport->num_active_ios),
1760 wait_cnt);
1761 if (wait_cnt == 0) {
1762 QEDF_ERR(&qedf->dbg_ctx,
1763 "Flushed %d I/Os, active=%d.\n",
1764 flush_cnt,
1765 atomic_read(&fcport->num_active_ios));
1766 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1767 io_req = &cmd_mgr->cmds[i];
1768 if (io_req->fcport &&
1769 io_req->fcport == fcport) {
1770 refcount =
1771 kref_read(&io_req->refcount);
Shyam Sundarfeac47f2019-03-26 00:38:53 -07001772 set_bit(QEDF_CMD_DIRTY,
1773 &io_req->flags);
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001774 QEDF_ERR(&qedf->dbg_ctx,
1775 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1776 io_req, io_req->xid,
1777 io_req->flags,
1778 io_req->sc_cmd,
1779 refcount,
1780 io_req->cmd_type);
1781 }
1782 }
1783 WARN_ON(1);
1784 break;
1785 }
1786 msleep(500);
1787 wait_cnt--;
1788 }
1789 }
1790
1791 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1792 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1793 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1794 mutex_unlock(&qedf->flush_mutex);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001795}
1796
1797/*
1798 * Initiate a ABTS middle path command. Note that we don't have to initialize
1799 * the task context for an ABTS task.
1800 */
1801int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1802{
1803 struct fc_lport *lport;
1804 struct qedf_rport *fcport = io_req->fcport;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001805 struct fc_rport_priv *rdata;
1806 struct qedf_ctx *qedf;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001807 u16 xid;
1808 u32 r_a_tov = 0;
1809 int rc = 0;
1810 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001811 struct fcoe_wqe *sqe;
1812 u16 sqe_idx;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001813 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001814
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001815 /* Sanity check qedf_rport before dereferencing any pointers */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001816 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001817 QEDF_ERR(NULL, "tgt not offloaded\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08001818 rc = 1;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001819 goto out;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001820 }
1821
Hannes Reinecke56efc302019-03-26 00:38:49 -07001822 qedf = fcport->qedf;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001823 rdata = fcport->rdata;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001824
1825 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1826 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1827 rc = 1;
1828 goto out;
1829 }
1830
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001831 r_a_tov = rdata->r_a_tov;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001832 lport = qedf->lport;
1833
Dupuis, Chad61d86582017-02-15 06:28:23 -08001834 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1835 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1836 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001837 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001838 }
1839
1840 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1841 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1842 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001843 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001844 }
1845
1846 /* Ensure room on SQ */
1847 if (!atomic_read(&fcport->free_sqes)) {
1848 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1849 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001850 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001851 }
1852
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001853 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1854 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1855 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001856 goto drop_rdata_kref;
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001857 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001858
Chad Dupuisf3690a82018-04-25 06:09:03 -07001859 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1860 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1861 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1862 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1863 "cleanup or abort processing or already "
1864 "completed.\n", io_req->xid);
1865 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001866 goto drop_rdata_kref;
Chad Dupuisf3690a82018-04-25 06:09:03 -07001867 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001868
1869 kref_get(&io_req->refcount);
1870
1871 xid = io_req->xid;
1872 qedf->control_requests++;
1873 qedf->packet_aborts++;
1874
Dupuis, Chad61d86582017-02-15 06:28:23 -08001875 /* Set the command type to abort */
1876 io_req->cmd_type = QEDF_ABTS;
1877 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1878
1879 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001880 refcount = kref_read(&io_req->refcount);
1881 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1882 "ABTS io_req xid = 0x%x refcount=%d\n",
1883 xid, refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001884
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001885 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001886
1887 spin_lock_irqsave(&fcport->rport_lock, flags);
1888
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001889 sqe_idx = qedf_get_sqe_idx(fcport);
1890 sqe = &fcport->sq[sqe_idx];
1891 memset(sqe, 0, sizeof(struct fcoe_wqe));
1892 io_req->task_params->sqe = sqe;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001893
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001894 init_initiator_abort_fcoe_task(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001895 qedf_ring_doorbell(fcport);
1896
1897 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1898
Hannes Reinecke56efc302019-03-26 00:38:49 -07001899drop_rdata_kref:
1900 kref_put(&rdata->kref, fc_rport_destroy);
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001901out:
Dupuis, Chad61d86582017-02-15 06:28:23 -08001902 return rc;
1903}
1904
1905void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1906 struct qedf_ioreq *io_req)
1907{
1908 uint32_t r_ctl;
1909 uint16_t xid;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001910 int rc;
1911 struct qedf_rport *fcport = io_req->fcport;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001912
1913 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1914 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1915
Dupuis, Chad61d86582017-02-15 06:28:23 -08001916 xid = io_req->xid;
1917 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1918
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001919 /* This was added at a point when we were scheduling abts_compl &
1920 * cleanup_compl on different CPUs and there was a possibility of
1921 * the io_req to be freed from the other context before we got here.
1922 */
1923 if (!fcport) {
1924 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1925 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1926 io_req->xid);
1927 return;
1928 }
1929
1930 /*
1931 * When flush is active, let the cmds be completed from the cleanup
1932 * context
1933 */
1934 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1935 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1936 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1937 "Dropping ABTS completion xid=0x%x as fcport is flushing",
1938 io_req->xid);
1939 return;
1940 }
1941
1942 if (!cancel_delayed_work(&io_req->timeout_work)) {
1943 QEDF_ERR(&qedf->dbg_ctx,
1944 "Wasn't able to cancel abts timeout work.\n");
1945 }
1946
Dupuis, Chad61d86582017-02-15 06:28:23 -08001947 switch (r_ctl) {
1948 case FC_RCTL_BA_ACC:
1949 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1950 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1951 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
Shyam Sundarfaea5712019-03-26 00:38:55 -07001952 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001953 if (!rc) {
1954 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1955 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
1956 io_req->xid);
1957 return;
1958 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001959 /*
1960 * Dont release this cmd yet. It will be relesed
1961 * after we get RRQ response
1962 */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001963 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1964 msecs_to_jiffies(qedf->lport->r_a_tov));
Shyam Sundarfaea5712019-03-26 00:38:55 -07001965 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001966 break;
1967 /* For error cases let the cleanup return the command */
1968 case FC_RCTL_BA_RJT:
1969 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1970 "ABTS response - RJT\n");
1971 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1972 break;
1973 default:
1974 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1975 break;
1976 }
1977
1978 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1979
1980 if (io_req->sc_cmd) {
1981 if (io_req->return_scsi_cmd_on_abts)
1982 qedf_scsi_done(qedf, io_req, DID_ERROR);
1983 }
1984
1985 /* Notify eh_abort handler that ABTS is complete */
1986 complete(&io_req->abts_done);
1987
1988 kref_put(&io_req->refcount, qedf_release_cmd);
1989}
1990
1991int qedf_init_mp_req(struct qedf_ioreq *io_req)
1992{
1993 struct qedf_mp_req *mp_req;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001994 struct scsi_sge *mp_req_bd;
1995 struct scsi_sge *mp_resp_bd;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001996 struct qedf_ctx *qedf = io_req->fcport->qedf;
1997 dma_addr_t addr;
1998 uint64_t sz;
1999
2000 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2001
2002 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2003 memset(mp_req, 0, sizeof(struct qedf_mp_req));
2004
2005 if (io_req->cmd_type != QEDF_ELS) {
2006 mp_req->req_len = sizeof(struct fcp_cmnd);
2007 io_req->data_xfer_len = mp_req->req_len;
2008 } else
2009 mp_req->req_len = io_req->data_xfer_len;
2010
2011 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2012 &mp_req->req_buf_dma, GFP_KERNEL);
2013 if (!mp_req->req_buf) {
2014 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2015 qedf_free_mp_resc(io_req);
2016 return -ENOMEM;
2017 }
2018
2019 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2020 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2021 if (!mp_req->resp_buf) {
2022 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2023 "buffer\n");
2024 qedf_free_mp_resc(io_req);
2025 return -ENOMEM;
2026 }
2027
2028 /* Allocate and map mp_req_bd and mp_resp_bd */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002029 sz = sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002030 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2031 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2032 if (!mp_req->mp_req_bd) {
2033 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2034 qedf_free_mp_resc(io_req);
2035 return -ENOMEM;
2036 }
2037
2038 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2039 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2040 if (!mp_req->mp_resp_bd) {
2041 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2042 qedf_free_mp_resc(io_req);
2043 return -ENOMEM;
2044 }
2045
2046 /* Fill bd table */
2047 addr = mp_req->req_buf_dma;
2048 mp_req_bd = mp_req->mp_req_bd;
2049 mp_req_bd->sge_addr.lo = U64_LO(addr);
2050 mp_req_bd->sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002051 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002052
2053 /*
2054 * MP buffer is either a task mgmt command or an ELS.
2055 * So the assumption is that it consumes a single bd
2056 * entry in the bd table
2057 */
2058 mp_resp_bd = mp_req->mp_resp_bd;
2059 addr = mp_req->resp_buf_dma;
2060 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2061 mp_resp_bd->sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002062 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002063
2064 return 0;
2065}
2066
2067/*
2068 * Last ditch effort to clear the port if it's stuck. Used only after a
2069 * cleanup task times out.
2070 */
2071static void qedf_drain_request(struct qedf_ctx *qedf)
2072{
2073 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2074 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2075 return;
2076 }
2077
2078 /* Set bit to return all queuecommand requests as busy */
2079 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2080
2081 /* Call qed drain request for function. Should be synchronous */
2082 qed_ops->common->drain(qedf->cdev);
2083
2084 /* Settle time for CQEs to be returned */
2085 msleep(100);
2086
2087 /* Unplug and continue */
2088 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2089}
2090
2091/*
2092 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2093 * FAILURE.
2094 */
2095int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2096 bool return_scsi_cmd_on_abts)
2097{
2098 struct qedf_rport *fcport;
2099 struct qedf_ctx *qedf;
2100 uint16_t xid;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02002101 struct e4_fcoe_task_context *task;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002102 int tmo = 0;
2103 int rc = SUCCESS;
2104 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002105 struct fcoe_wqe *sqe;
2106 u16 sqe_idx;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002107 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002108
2109 fcport = io_req->fcport;
2110 if (!fcport) {
2111 QEDF_ERR(NULL, "fcport is NULL.\n");
2112 return SUCCESS;
2113 }
2114
Chad Dupuisff34e8e2017-05-31 06:33:52 -07002115 /* Sanity check qedf_rport before dereferencing any pointers */
2116 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2117 QEDF_ERR(NULL, "tgt not offloaded\n");
2118 rc = 1;
2119 return SUCCESS;
2120 }
2121
Dupuis, Chad61d86582017-02-15 06:28:23 -08002122 qedf = fcport->qedf;
2123 if (!qedf) {
2124 QEDF_ERR(NULL, "qedf is NULL.\n");
2125 return SUCCESS;
2126 }
2127
2128 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002129 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08002130 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2131 "cleanup processing or already completed.\n",
2132 io_req->xid);
2133 return SUCCESS;
2134 }
Chad Dupuis96b17652019-03-26 00:38:39 -07002135 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002136
2137 /* Ensure room on SQ */
2138 if (!atomic_read(&fcport->free_sqes)) {
2139 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
Chad Dupuis96b17652019-03-26 00:38:39 -07002140 /* Need to make sure we clear the flag since it was set */
2141 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002142 return FAILED;
2143 }
2144
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002145 if (io_req->cmd_type == QEDF_CLEANUP) {
2146 QEDF_ERR(&qedf->dbg_ctx,
2147 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2148 io_req->xid, io_req->cmd_type);
2149 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2150 return SUCCESS;
2151 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002152
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002153 refcount = kref_read(&io_req->refcount);
2154
2155 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2156 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d\n",
2157 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2158 refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002159
2160 /* Cleanup cmds re-use the same TID as the original I/O */
2161 xid = io_req->xid;
2162 io_req->cmd_type = QEDF_CLEANUP;
2163 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2164
Dupuis, Chad61d86582017-02-15 06:28:23 -08002165 task = qedf_get_task_mem(&qedf->tasks, xid);
2166
Chad Dupuis96b17652019-03-26 00:38:39 -07002167 init_completion(&io_req->cleanup_done);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002168
Dupuis, Chad61d86582017-02-15 06:28:23 -08002169 spin_lock_irqsave(&fcport->rport_lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002170
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002171 sqe_idx = qedf_get_sqe_idx(fcport);
2172 sqe = &fcport->sq[sqe_idx];
2173 memset(sqe, 0, sizeof(struct fcoe_wqe));
2174 io_req->task_params->sqe = sqe;
2175
2176 init_initiator_cleanup_fcoe_task(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002177 qedf_ring_doorbell(fcport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002178
Dupuis, Chad61d86582017-02-15 06:28:23 -08002179 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2180
Chad Dupuis96b17652019-03-26 00:38:39 -07002181 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2182 QEDF_CLEANUP_TIMEOUT * HZ);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002183
2184 if (!tmo) {
2185 rc = FAILED;
2186 /* Timeout case */
2187 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2188 "xid=%x.\n", io_req->xid);
2189 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2190 /* Issue a drain request if cleanup task times out */
2191 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2192 qedf_drain_request(qedf);
2193 }
2194
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002195 /* If it TASK MGMT handle it, reference will be decreased
2196 * in qedf_execute_tmf
2197 */
Saurav Kashyapf2c98af2019-03-26 00:38:54 -07002198 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2199 io_req->tm_flags == FCP_TMF_TGT_RESET) {
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002200 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2201 io_req->sc_cmd = NULL;
2202 complete(&io_req->tm_done);
2203 }
2204
Dupuis, Chad61d86582017-02-15 06:28:23 -08002205 if (io_req->sc_cmd) {
2206 if (io_req->return_scsi_cmd_on_abts)
2207 qedf_scsi_done(qedf, io_req, DID_ERROR);
2208 }
2209
2210 if (rc == SUCCESS)
2211 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2212 else
2213 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2214
2215 return rc;
2216}
2217
2218void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2219 struct qedf_ioreq *io_req)
2220{
2221 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2222 io_req->xid);
2223
2224 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2225
2226 /* Complete so we can finish cleaning up the I/O */
Chad Dupuis96b17652019-03-26 00:38:39 -07002227 complete(&io_req->cleanup_done);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002228}
2229
2230static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2231 uint8_t tm_flags)
2232{
2233 struct qedf_ioreq *io_req;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02002234 struct e4_fcoe_task_context *task;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002235 struct qedf_ctx *qedf = fcport->qedf;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002236 struct fc_lport *lport = qedf->lport;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002237 int rc = 0;
2238 uint16_t xid;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002239 int tmo = 0;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002240 int lun = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002241 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002242 struct fcoe_wqe *sqe;
2243 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002244
2245 if (!sc_cmd) {
2246 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2247 return FAILED;
2248 }
2249
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002250 lun = (int)sc_cmd->device->lun;
Chad Dupuis57a35482017-05-31 06:33:58 -07002251 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08002252 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2253 rc = FAILED;
Saurav Kashyapfe2043d2019-03-26 00:38:56 -07002254 goto no_flush;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002255 }
2256
Dupuis, Chad61d86582017-02-15 06:28:23 -08002257 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2258 if (!io_req) {
2259 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2260 rc = -EAGAIN;
Saurav Kashyapfe2043d2019-03-26 00:38:56 -07002261 goto no_flush;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002262 }
2263
Chad Dupuis642a0b32018-05-22 00:28:43 -07002264 if (tm_flags == FCP_TMF_LUN_RESET)
2265 qedf->lun_resets++;
2266 else if (tm_flags == FCP_TMF_TGT_RESET)
2267 qedf->target_resets++;
2268
Dupuis, Chad61d86582017-02-15 06:28:23 -08002269 /* Initialize rest of io_req fields */
2270 io_req->sc_cmd = sc_cmd;
2271 io_req->fcport = fcport;
2272 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2273
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002274 /* Record which cpu this request is associated with */
Dupuis, Chad61d86582017-02-15 06:28:23 -08002275 io_req->cpu = smp_processor_id();
2276
Dupuis, Chad61d86582017-02-15 06:28:23 -08002277 /* Set TM flags */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002278 io_req->io_req_flags = QEDF_READ;
2279 io_req->data_xfer_len = 0;
2280 io_req->tm_flags = tm_flags;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002281
2282 /* Default is to return a SCSI command when an error occurs */
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002283 io_req->return_scsi_cmd_on_abts = false;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002284
Dupuis, Chad61d86582017-02-15 06:28:23 -08002285 /* Obtain exchange id */
2286 xid = io_req->xid;
2287
2288 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2289 "0x%x\n", xid);
2290
2291 /* Initialize task context for this IO request */
2292 task = qedf_get_task_mem(&qedf->tasks, xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002293
2294 init_completion(&io_req->tm_done);
2295
Dupuis, Chad61d86582017-02-15 06:28:23 -08002296 spin_lock_irqsave(&fcport->rport_lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002297
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002298 sqe_idx = qedf_get_sqe_idx(fcport);
2299 sqe = &fcport->sq[sqe_idx];
2300 memset(sqe, 0, sizeof(struct fcoe_wqe));
2301
2302 qedf_init_task(fcport, lport, io_req, task, sqe);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002303 qedf_ring_doorbell(fcport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002304
Dupuis, Chad61d86582017-02-15 06:28:23 -08002305 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2306
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002307 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002308 tmo = wait_for_completion_timeout(&io_req->tm_done,
2309 QEDF_TM_TIMEOUT * HZ);
2310
2311 if (!tmo) {
2312 rc = FAILED;
2313 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002314 /* Clear outstanding bit since command timed out */
2315 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2316 io_req->sc_cmd = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002317 } else {
2318 /* Check TMF response code */
2319 if (io_req->fcp_rsp_code == 0)
2320 rc = SUCCESS;
2321 else
2322 rc = FAILED;
2323 }
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002324 /*
2325 * Double check that fcport has not gone into an uploading state before
2326 * executing the command flush for the LUN/target.
2327 */
2328 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2329 QEDF_ERR(&qedf->dbg_ctx,
2330 "fcport is uploading, not executing flush.\n");
2331 goto no_flush;
2332 }
2333 /* We do not need this io_req any more */
2334 kref_put(&io_req->refcount, qedf_release_cmd);
2335
Dupuis, Chad61d86582017-02-15 06:28:23 -08002336
2337 if (tm_flags == FCP_TMF_LUN_RESET)
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002338 qedf_flush_active_ios(fcport, lun);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002339 else
2340 qedf_flush_active_ios(fcport, -1);
2341
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002342no_flush:
Dupuis, Chad61d86582017-02-15 06:28:23 -08002343 if (rc != SUCCESS) {
2344 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2345 rc = FAILED;
2346 } else {
2347 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2348 rc = SUCCESS;
2349 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002350 return rc;
2351}
2352
2353int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2354{
2355 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2356 struct fc_rport_libfc_priv *rp = rport->dd_data;
2357 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2358 struct qedf_ctx *qedf;
2359 struct fc_lport *lport;
2360 int rc = SUCCESS;
2361 int rval;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002362 struct qedf_ioreq *io_req = NULL;
2363 int ref_cnt = 0;
2364 struct fc_rport_priv *rdata = fcport->rdata;
2365
2366 QEDF_ERR(NULL,
2367 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2368 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
2369 (int)sc_cmd->device->lun);
2370
2371 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2372 QEDF_ERR(NULL, "stale rport\n");
2373 return FAILED;
2374 }
2375
2376 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2377 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2378 "LUN RESET");
2379
2380 if (sc_cmd->SCp.ptr) {
2381 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2382 ref_cnt = kref_read(&io_req->refcount);
2383 QEDF_ERR(NULL,
2384 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2385 io_req, io_req->xid, ref_cnt);
2386 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002387
2388 rval = fc_remote_port_chkready(rport);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002389 if (rval) {
2390 QEDF_ERR(NULL, "device_reset rport not ready\n");
2391 rc = FAILED;
2392 goto tmf_err;
2393 }
2394
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002395 rc = fc_block_scsi_eh(sc_cmd);
2396 if (rc)
Saurav Kashyapfe2043d2019-03-26 00:38:56 -07002397 goto tmf_err;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002398
2399 if (!fcport) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08002400 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2401 rc = FAILED;
2402 goto tmf_err;
2403 }
2404
2405 qedf = fcport->qedf;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002406
2407 if (!qedf) {
2408 QEDF_ERR(NULL, "qedf is NULL.\n");
2409 rc = FAILED;
2410 goto tmf_err;
2411 }
2412
Dupuis, Chad61d86582017-02-15 06:28:23 -08002413 lport = qedf->lport;
2414
Shyam Sundarff543e22019-03-26 00:38:51 -07002415 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2416 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2417 rc = SUCCESS;
2418 goto tmf_err;
2419 }
2420
Dupuis, Chad61d86582017-02-15 06:28:23 -08002421 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2422 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2423 rc = SUCCESS;
2424 goto tmf_err;
2425 }
2426
2427 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2428 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2429 rc = FAILED;
2430 goto tmf_err;
2431 }
2432
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002433 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
Saurav Kashyapfe2043d2019-03-26 00:38:56 -07002434 if (!fcport->rdata)
2435 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2436 fcport);
2437 else
2438 QEDF_ERR(&qedf->dbg_ctx,
2439 "fcport %p port_id=%06x is uploading.\n",
2440 fcport, fcport->rdata->ids.port_id);
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002441 rc = FAILED;
2442 goto tmf_err;
2443 }
2444
Dupuis, Chad61d86582017-02-15 06:28:23 -08002445 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2446
2447tmf_err:
Saurav Kashyapfe2043d2019-03-26 00:38:56 -07002448 kref_put(&rdata->kref, fc_rport_destroy);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002449 return rc;
2450}
2451
2452void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2453 struct qedf_ioreq *io_req)
2454{
2455 struct fcoe_cqe_rsp_info *fcp_rsp;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002456
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002457 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2458
Dupuis, Chad61d86582017-02-15 06:28:23 -08002459 fcp_rsp = &cqe->cqe_info.rsp_info;
2460 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2461
2462 io_req->sc_cmd = NULL;
2463 complete(&io_req->tm_done);
2464}
2465
2466void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2467 struct fcoe_cqe *cqe)
2468{
2469 unsigned long flags;
2470 uint16_t tmp;
2471 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2472 u32 payload_len, crc;
2473 struct fc_frame_header *fh;
2474 struct fc_frame *fp;
2475 struct qedf_io_work *io_work;
2476 u32 bdq_idx;
2477 void *bdq_addr;
Tomer Tayarda090912017-12-27 19:30:07 +02002478 struct scsi_bd *p_bd_info;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002479
Tomer Tayarda090912017-12-27 19:30:07 +02002480 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002481 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
Tomer Tayarda090912017-12-27 19:30:07 +02002482 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2483 le32_to_cpu(p_bd_info->address.hi),
2484 le32_to_cpu(p_bd_info->address.lo),
2485 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2486 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2487 qedf->bdq_prod_idx, pktlen);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002488
Tomer Tayarda090912017-12-27 19:30:07 +02002489 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002490 if (bdq_idx >= QEDF_BDQ_SIZE) {
2491 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2492 bdq_idx);
2493 goto increment_prod;
2494 }
2495
2496 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2497 if (!bdq_addr) {
2498 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2499 "unsolicited packet.\n");
2500 goto increment_prod;
2501 }
2502
2503 if (qedf_dump_frames) {
2504 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2505 "BDQ frame is at addr=%p.\n", bdq_addr);
2506 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2507 (void *)bdq_addr, pktlen, false);
2508 }
2509
2510 /* Allocate frame */
2511 payload_len = pktlen - sizeof(struct fc_frame_header);
2512 fp = fc_frame_alloc(qedf->lport, payload_len);
2513 if (!fp) {
2514 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2515 goto increment_prod;
2516 }
2517
2518 /* Copy data from BDQ buffer into fc_frame struct */
2519 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2520 memcpy(fh, (void *)bdq_addr, pktlen);
2521
2522 /* Initialize the frame so libfc sees it as a valid frame */
2523 crc = fcoe_fc_crc(fp);
2524 fc_frame_init(fp);
2525 fr_dev(fp) = qedf->lport;
2526 fr_sof(fp) = FC_SOF_I3;
2527 fr_eof(fp) = FC_EOF_T;
2528 fr_crc(fp) = cpu_to_le32(~crc);
2529
2530 /*
2531 * We need to return the frame back up to libfc in a non-atomic
2532 * context
2533 */
2534 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2535 if (!io_work) {
2536 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2537 "work for I/O completion.\n");
2538 fc_frame_free(fp);
2539 goto increment_prod;
2540 }
2541 memset(io_work, 0, sizeof(struct qedf_io_work));
2542
2543 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2544
2545 /* Copy contents of CQE for deferred processing */
2546 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2547
2548 io_work->qedf = qedf;
2549 io_work->fp = fp;
2550
2551 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2552increment_prod:
2553 spin_lock_irqsave(&qedf->hba_lock, flags);
2554
2555 /* Increment producer to let f/w know we've handled the frame */
2556 qedf->bdq_prod_idx++;
2557
2558 /* Producer index wraps at uint16_t boundary */
2559 if (qedf->bdq_prod_idx == 0xffff)
2560 qedf->bdq_prod_idx = 0;
2561
2562 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2563 tmp = readw(qedf->bdq_primary_prod);
2564 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2565 tmp = readw(qedf->bdq_secondary_prod);
2566
2567 spin_unlock_irqrestore(&qedf->hba_lock, flags);
2568}