blob: 56d211674a9a2ae9ab8987aeb8f423c82d5af165 [file] [log] [blame]
Dupuis, Chad61d86582017-02-15 06:28:23 -08001/*
2 * QLogic FCoE Offload Driver
Chad Dupuis5d1c8b52018-04-25 06:09:04 -07003 * Copyright (c) 2016-2018 Cavium Inc.
Dupuis, Chad61d86582017-02-15 06:28:23 -08004 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11#include "qedf.h"
12#include <scsi/scsi_tcq.h>
13
14void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
16{
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
19}
20
21static void qedf_cmd_timeout(struct work_struct *work)
22{
23
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
Chad Dupuisf3690a82018-04-25 06:09:03 -070026 struct qedf_ctx *qedf;
27 struct qedf_rport *fcport;
Dupuis, Chad61d86582017-02-15 06:28:23 -080028 u8 op = 0;
29
Chad Dupuisf3690a82018-04-25 06:09:03 -070030 if (io_req == NULL) {
31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32 return;
33 }
34
35 fcport = io_req->fcport;
36 if (io_req->fcport == NULL) {
37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
38 return;
39 }
40
41 qedf = fcport->qedf;
42
Dupuis, Chad61d86582017-02-15 06:28:23 -080043 switch (io_req->cmd_type) {
44 case QEDF_ABTS:
Chad Dupuisf3690a82018-04-25 06:09:03 -070045 if (qedf == NULL) {
Saurav Kashyap69ef2c62019-03-26 00:38:38 -070046 QEDF_INFO(NULL, QEDF_LOG_IO,
47 "qedf is NULL for ABTS xid=0x%x.\n",
48 io_req->xid);
Chad Dupuisf3690a82018-04-25 06:09:03 -070049 return;
50 }
51
Dupuis, Chad61d86582017-02-15 06:28:23 -080052 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
53 io_req->xid);
54 /* Cleanup timed out ABTS */
55 qedf_initiate_cleanup(io_req, true);
56 complete(&io_req->abts_done);
57
58 /*
59 * Need to call kref_put for reference taken when initiate_abts
60 * was called since abts_compl won't be called now that we've
61 * cleaned up the task.
62 */
63 kref_put(&io_req->refcount, qedf_release_cmd);
64
Saurav Kashyap69ef2c62019-03-26 00:38:38 -070065 /* Clear in abort bit now that we're done with the command */
66 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
67
Dupuis, Chad61d86582017-02-15 06:28:23 -080068 /*
69 * Now that the original I/O and the ABTS are complete see
70 * if we need to reconnect to the target.
71 */
72 qedf_restart_rport(fcport);
73 break;
74 case QEDF_ELS:
Saurav Kashyap69ef2c62019-03-26 00:38:38 -070075 if (!qedf) {
76 QEDF_INFO(NULL, QEDF_LOG_IO,
77 "qedf is NULL for ELS xid=0x%x.\n",
78 io_req->xid);
79 return;
80 }
81 /* ELS request no longer outstanding since it timed out */
82 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
83
Dupuis, Chad61d86582017-02-15 06:28:23 -080084 kref_get(&io_req->refcount);
85 /*
86 * Don't attempt to clean an ELS timeout as any subseqeunt
87 * ABTS or cleanup requests just hang. For now just free
88 * the resources of the original I/O and the RRQ
89 */
90 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
91 io_req->xid);
92 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
93 /* Call callback function to complete command */
94 if (io_req->cb_func && io_req->cb_arg) {
95 op = io_req->cb_arg->op;
96 io_req->cb_func(io_req->cb_arg);
97 io_req->cb_arg = NULL;
98 }
99 qedf_initiate_cleanup(io_req, true);
100 kref_put(&io_req->refcount, qedf_release_cmd);
101 break;
102 case QEDF_SEQ_CLEANUP:
103 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
104 "xid=0x%x.\n", io_req->xid);
105 qedf_initiate_cleanup(io_req, true);
106 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
107 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
108 break;
109 default:
110 break;
111 }
112}
113
114void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
115{
116 struct io_bdt *bdt_info;
117 struct qedf_ctx *qedf = cmgr->qedf;
118 size_t bd_tbl_sz;
Chad Dupuis650ce642019-03-26 00:38:34 -0700119 u16 min_xid = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800120 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
121 int num_ios;
122 int i;
123 struct qedf_ioreq *io_req;
124
125 num_ios = max_xid - min_xid + 1;
126
127 /* Free fcoe_bdt_ctx structures */
128 if (!cmgr->io_bdt_pool)
129 goto free_cmd_pool;
130
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200131 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800132 for (i = 0; i < num_ios; i++) {
133 bdt_info = cmgr->io_bdt_pool[i];
134 if (bdt_info->bd_tbl) {
135 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
136 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
137 bdt_info->bd_tbl = NULL;
138 }
139 }
140
141 /* Destroy io_bdt pool */
142 for (i = 0; i < num_ios; i++) {
143 kfree(cmgr->io_bdt_pool[i]);
144 cmgr->io_bdt_pool[i] = NULL;
145 }
146
147 kfree(cmgr->io_bdt_pool);
148 cmgr->io_bdt_pool = NULL;
149
150free_cmd_pool:
151
152 for (i = 0; i < num_ios; i++) {
153 io_req = &cmgr->cmds[i];
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200154 kfree(io_req->sgl_task_params);
155 kfree(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800156 /* Make sure we free per command sense buffer */
157 if (io_req->sense_buffer)
158 dma_free_coherent(&qedf->pdev->dev,
159 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
160 io_req->sense_buffer_dma);
161 cancel_delayed_work_sync(&io_req->rrq_work);
162 }
163
164 /* Free command manager itself */
165 vfree(cmgr);
166}
167
168static void qedf_handle_rrq(struct work_struct *work)
169{
170 struct qedf_ioreq *io_req =
171 container_of(work, struct qedf_ioreq, rrq_work.work);
172
173 qedf_send_rrq(io_req);
174
175}
176
177struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
178{
179 struct qedf_cmd_mgr *cmgr;
180 struct io_bdt *bdt_info;
181 struct qedf_ioreq *io_req;
182 u16 xid;
183 int i;
184 int num_ios;
Chad Dupuis650ce642019-03-26 00:38:34 -0700185 u16 min_xid = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800186 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
187
188 /* Make sure num_queues is already set before calling this function */
189 if (!qedf->num_queues) {
190 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
191 return NULL;
192 }
193
194 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
195 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
196 "max_xid 0x%x.\n", min_xid, max_xid);
197 return NULL;
198 }
199
200 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
201 "0x%x.\n", min_xid, max_xid);
202
203 num_ios = max_xid - min_xid + 1;
204
205 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
206 if (!cmgr) {
207 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
208 return NULL;
209 }
210
211 cmgr->qedf = qedf;
212 spin_lock_init(&cmgr->lock);
213
214 /*
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200215 * Initialize I/O request fields.
Dupuis, Chad61d86582017-02-15 06:28:23 -0800216 */
Chad Dupuis650ce642019-03-26 00:38:34 -0700217 xid = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800218
219 for (i = 0; i < num_ios; i++) {
220 io_req = &cmgr->cmds[i];
221 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
222
223 io_req->xid = xid++;
224
225 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
226
227 /* Allocate DMA memory to hold sense buffer */
228 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
229 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
230 GFP_KERNEL);
231 if (!io_req->sense_buffer)
232 goto mem_err;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200233
234 /* Allocate task parameters to pass to f/w init funcions */
235 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
236 GFP_KERNEL);
237 if (!io_req->task_params) {
238 QEDF_ERR(&(qedf->dbg_ctx),
239 "Failed to allocate task_params for xid=0x%x\n",
240 i);
241 goto mem_err;
242 }
243
244 /*
245 * Allocate scatter/gather list info to pass to f/w init
246 * functions.
247 */
248 io_req->sgl_task_params = kzalloc(
249 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
250 if (!io_req->sgl_task_params) {
251 QEDF_ERR(&(qedf->dbg_ctx),
252 "Failed to allocate sgl_task_params for xid=0x%x\n",
253 i);
254 goto mem_err;
255 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800256 }
257
258 /* Allocate pool of io_bdts - one for each qedf_ioreq */
259 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
260 GFP_KERNEL);
261
262 if (!cmgr->io_bdt_pool) {
263 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
264 goto mem_err;
265 }
266
267 for (i = 0; i < num_ios; i++) {
268 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
269 GFP_KERNEL);
270 if (!cmgr->io_bdt_pool[i]) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200271 QEDF_WARN(&(qedf->dbg_ctx),
272 "Failed to alloc io_bdt_pool[%d].\n", i);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800273 goto mem_err;
274 }
275 }
276
277 for (i = 0; i < num_ios; i++) {
278 bdt_info = cmgr->io_bdt_pool[i];
279 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200280 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
Dupuis, Chad61d86582017-02-15 06:28:23 -0800281 &bdt_info->bd_tbl_dma, GFP_KERNEL);
282 if (!bdt_info->bd_tbl) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200283 QEDF_WARN(&(qedf->dbg_ctx),
284 "Failed to alloc bdt_tbl[%d].\n", i);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800285 goto mem_err;
286 }
287 }
288 atomic_set(&cmgr->free_list_cnt, num_ios);
289 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
290 "cmgr->free_list_cnt=%d.\n",
291 atomic_read(&cmgr->free_list_cnt));
292
293 return cmgr;
294
295mem_err:
296 qedf_cmd_mgr_free(cmgr);
297 return NULL;
298}
299
300struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
301{
302 struct qedf_ctx *qedf = fcport->qedf;
303 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
304 struct qedf_ioreq *io_req = NULL;
305 struct io_bdt *bd_tbl;
306 u16 xid;
307 uint32_t free_sqes;
308 int i;
309 unsigned long flags;
310
311 free_sqes = atomic_read(&fcport->free_sqes);
312
313 if (!free_sqes) {
314 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
315 "Returning NULL, free_sqes=%d.\n ",
316 free_sqes);
317 goto out_failed;
318 }
319
320 /* Limit the number of outstanding R/W tasks */
321 if ((atomic_read(&fcport->num_active_ios) >=
322 NUM_RW_TASKS_PER_CONNECTION)) {
323 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
324 "Returning NULL, num_active_ios=%d.\n",
325 atomic_read(&fcport->num_active_ios));
326 goto out_failed;
327 }
328
329 /* Limit global TIDs certain tasks */
330 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
331 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
332 "Returning NULL, free_list_cnt=%d.\n",
333 atomic_read(&cmd_mgr->free_list_cnt));
334 goto out_failed;
335 }
336
337 spin_lock_irqsave(&cmd_mgr->lock, flags);
338 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
339 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
340 cmd_mgr->idx++;
341 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
342 cmd_mgr->idx = 0;
343
344 /* Check to make sure command was previously freed */
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700345 if (!io_req->alloc)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800346 break;
347 }
348
349 if (i == FCOE_PARAMS_NUM_TASKS) {
350 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
351 goto out_failed;
352 }
353
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700354 /* Clear any flags now that we've reallocated the xid */
355 io_req->flags = 0;
356 io_req->alloc = 1;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800357 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
358
359 atomic_inc(&fcport->num_active_ios);
360 atomic_dec(&fcport->free_sqes);
361 xid = io_req->xid;
362 atomic_dec(&cmd_mgr->free_list_cnt);
363
364 io_req->cmd_mgr = cmd_mgr;
365 io_req->fcport = fcport;
366
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700367 /* Clear any stale sc_cmd back pointer */
368 io_req->sc_cmd = NULL;
369 io_req->lun = -1;
370
Dupuis, Chad61d86582017-02-15 06:28:23 -0800371 /* Hold the io_req against deletion */
372 kref_init(&io_req->refcount);
373
374 /* Bind io_bdt for this io_req */
375 /* Have a static link between io_req and io_bdt_pool */
376 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
377 if (bd_tbl == NULL) {
378 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
379 kref_put(&io_req->refcount, qedf_release_cmd);
380 goto out_failed;
381 }
382 bd_tbl->io_req = io_req;
383 io_req->cmd_type = cmd_type;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200384 io_req->tm_flags = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800385
386 /* Reset sequence offset data */
387 io_req->rx_buf_off = 0;
388 io_req->tx_buf_off = 0;
389 io_req->rx_id = 0xffff; /* No OX_ID */
390
391 return io_req;
392
393out_failed:
394 /* Record failure for stats and return NULL to caller */
395 qedf->alloc_failures++;
396 return NULL;
397}
398
399static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
400{
401 struct qedf_mp_req *mp_req = &(io_req->mp_req);
402 struct qedf_ctx *qedf = io_req->fcport->qedf;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200403 uint64_t sz = sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800404
405 /* clear tm flags */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800406 if (mp_req->mp_req_bd) {
407 dma_free_coherent(&qedf->pdev->dev, sz,
408 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
409 mp_req->mp_req_bd = NULL;
410 }
411 if (mp_req->mp_resp_bd) {
412 dma_free_coherent(&qedf->pdev->dev, sz,
413 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
414 mp_req->mp_resp_bd = NULL;
415 }
416 if (mp_req->req_buf) {
417 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
418 mp_req->req_buf, mp_req->req_buf_dma);
419 mp_req->req_buf = NULL;
420 }
421 if (mp_req->resp_buf) {
422 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
423 mp_req->resp_buf, mp_req->resp_buf_dma);
424 mp_req->resp_buf = NULL;
425 }
426}
427
428void qedf_release_cmd(struct kref *ref)
429{
430 struct qedf_ioreq *io_req =
431 container_of(ref, struct qedf_ioreq, refcount);
432 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
433 struct qedf_rport *fcport = io_req->fcport;
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700434 unsigned long flags;
435
436 if (io_req->cmd_type == QEDF_SCSI_CMD)
437 WARN_ON(io_req->sc_cmd);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800438
439 if (io_req->cmd_type == QEDF_ELS ||
440 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
441 qedf_free_mp_resc(io_req);
442
443 atomic_inc(&cmd_mgr->free_list_cnt);
444 atomic_dec(&fcport->num_active_ios);
445 if (atomic_read(&fcport->num_active_ios) < 0)
446 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
447
448 /* Increment task retry identifier now that the request is released */
449 io_req->task_retry_identifier++;
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700450 io_req->fcport = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800451
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700452 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
453 io_req->cpu = 0;
454 spin_lock_irqsave(&cmd_mgr->lock, flags);
455 io_req->fcport = NULL;
456 io_req->alloc = 0;
457 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800458}
459
Dupuis, Chad61d86582017-02-15 06:28:23 -0800460static int qedf_map_sg(struct qedf_ioreq *io_req)
461{
462 struct scsi_cmnd *sc = io_req->sc_cmd;
463 struct Scsi_Host *host = sc->device->host;
464 struct fc_lport *lport = shost_priv(host);
465 struct qedf_ctx *qedf = lport_priv(lport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200466 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800467 struct scatterlist *sg;
468 int byte_count = 0;
469 int sg_count = 0;
470 int bd_count = 0;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700471 u32 sg_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800472 u64 addr, end_addr;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700473 int i = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800474
475 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
476 scsi_sg_count(sc), sc->sc_data_direction);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800477 sg = scsi_sglist(sc);
478
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700479 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800480
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700481 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
482 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800483
484 scsi_for_each_sg(sc, sg, sg_count, i) {
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700485 sg_len = (u32)sg_dma_len(sg);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800486 addr = (u64)sg_dma_address(sg);
487 end_addr = (u64)(addr + sg_len);
488
489 /*
Dupuis, Chad61d86582017-02-15 06:28:23 -0800490 * Intermediate s/g element so check if start and end address
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700491 * is page aligned. Only required for writes and only if the
492 * number of scatter/gather elements is 8 or more.
Dupuis, Chad61d86582017-02-15 06:28:23 -0800493 */
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700494 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
495 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
496 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800497
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700498 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
499 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
500 bd[bd_count].sge_len = cpu_to_le32(sg_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800501
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700502 bd_count++;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800503 byte_count += sg_len;
504 }
505
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700506 /* To catch a case where FAST and SLOW nothing is set, set FAST */
507 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
508 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
509
Dupuis, Chad61d86582017-02-15 06:28:23 -0800510 if (byte_count != scsi_bufflen(sc))
511 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
512 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
513 scsi_bufflen(sc), io_req->xid);
514
515 return bd_count;
516}
517
518static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
519{
520 struct scsi_cmnd *sc = io_req->sc_cmd;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200521 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800522 int bd_count;
523
524 if (scsi_sg_count(sc)) {
525 bd_count = qedf_map_sg(io_req);
526 if (bd_count == 0)
527 return -ENOMEM;
528 } else {
529 bd_count = 0;
530 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200531 bd[0].sge_len = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800532 }
533 io_req->bd_tbl->bd_valid = bd_count;
534
535 return 0;
536}
537
538static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
539 struct fcp_cmnd *fcp_cmnd)
540{
541 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
542
543 /* fcp_cmnd is 32 bytes */
544 memset(fcp_cmnd, 0, FCP_CMND_LEN);
545
546 /* 8 bytes: SCSI LUN info */
547 int_to_scsilun(sc_cmd->device->lun,
548 (struct scsi_lun *)&fcp_cmnd->fc_lun);
549
550 /* 4 bytes: flag info */
551 fcp_cmnd->fc_pri_ta = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200552 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800553 fcp_cmnd->fc_flags = io_req->io_req_flags;
554 fcp_cmnd->fc_cmdref = 0;
555
556 /* Populate data direction */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200557 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800558 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200559 } else {
560 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
561 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
562 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
564 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800565
566 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
567
568 /* 16 bytes: CDB information */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200569 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
570 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800571
572 /* 4 bytes: FCP data length */
573 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800574}
575
576static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200577 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200578 struct fcoe_wqe *sqe)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800579{
580 enum fcoe_task_type task_type;
581 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
582 struct io_bdt *bd_tbl = io_req->bd_tbl;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200583 u8 fcp_cmnd[32];
Dupuis, Chad61d86582017-02-15 06:28:23 -0800584 u32 tmp_fcp_cmnd[8];
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200585 int bd_count = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800586 struct qedf_ctx *qedf = fcport->qedf;
587 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200588 struct regpair sense_data_buffer_phys_addr;
589 u32 tx_io_size = 0;
590 u32 rx_io_size = 0;
591 int i, cnt;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800592
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200593 /* Note init_initiator_rw_fcoe_task memsets the task context */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800594 io_req->task = task_ctx;
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200595 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200596 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
597 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800598
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200599 /* Set task type bassed on DMA directio of command */
600 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800601 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800602 } else {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200603 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
604 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
605 tx_io_size = io_req->data_xfer_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800606 } else {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200607 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
608 rx_io_size = io_req->data_xfer_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800609 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800610 }
611
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200612 /* Setup the fields for fcoe_task_params */
613 io_req->task_params->context = task_ctx;
614 io_req->task_params->sqe = sqe;
615 io_req->task_params->task_type = task_type;
616 io_req->task_params->tx_io_size = tx_io_size;
617 io_req->task_params->rx_io_size = rx_io_size;
618 io_req->task_params->conn_cid = fcport->fw_cid;
619 io_req->task_params->itid = io_req->xid;
620 io_req->task_params->cq_rss_number = cq_idx;
621 io_req->task_params->is_tape_device = fcport->dev_type;
622
623 /* Fill in information for scatter/gather list */
624 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
625 bd_count = bd_tbl->bd_valid;
626 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
627 io_req->sgl_task_params->sgl_phys_addr.lo =
628 U64_LO(bd_tbl->bd_tbl_dma);
629 io_req->sgl_task_params->sgl_phys_addr.hi =
630 U64_HI(bd_tbl->bd_tbl_dma);
631 io_req->sgl_task_params->num_sges = bd_count;
632 io_req->sgl_task_params->total_buffer_size =
633 scsi_bufflen(io_req->sc_cmd);
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700634 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
635 io_req->sgl_task_params->small_mid_sge = 1;
636 else
637 io_req->sgl_task_params->small_mid_sge = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200638 }
639
640 /* Fill in physical address of sense buffer */
641 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
642 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
643
Dupuis, Chad61d86582017-02-15 06:28:23 -0800644 /* fill FCP_CMND IU */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200645 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800646
647 /* Swap fcp_cmnd since FC is big endian */
648 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800649 for (i = 0; i < cnt; i++) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200650 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800651 }
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200652 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800653
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200654 init_initiator_rw_fcoe_task(io_req->task_params,
655 io_req->sgl_task_params,
656 sense_data_buffer_phys_addr,
657 io_req->task_retry_identifier, fcp_cmnd);
658
659 /* Increment SGL type counters */
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700660 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200661 qedf->slow_sge_ios++;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700662 else
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200663 qedf->fast_sge_ios++;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800664}
665
666void qedf_init_mp_task(struct qedf_ioreq *io_req,
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200667 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800668{
669 struct qedf_mp_req *mp_req = &(io_req->mp_req);
670 struct qedf_rport *fcport = io_req->fcport;
671 struct qedf_ctx *qedf = io_req->fcport->qedf;
672 struct fc_frame_header *fc_hdr;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200673 struct fcoe_tx_mid_path_params task_fc_hdr;
674 struct scsi_sgl_task_params tx_sgl_task_params;
675 struct scsi_sgl_task_params rx_sgl_task_params;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800676
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200677 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
678 "Initializing MP task for cmd_type=%d\n",
679 io_req->cmd_type);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800680
681 qedf->control_requests++;
682
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200683 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
684 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200685 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200686 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800687
688 /* Setup the task from io_req for easy reference */
689 io_req->task = task_ctx;
690
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200691 /* Setup the fields for fcoe_task_params */
692 io_req->task_params->context = task_ctx;
693 io_req->task_params->sqe = sqe;
694 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
695 io_req->task_params->tx_io_size = io_req->data_xfer_len;
696 /* rx_io_size tells the f/w how large a response buffer we have */
697 io_req->task_params->rx_io_size = PAGE_SIZE;
698 io_req->task_params->conn_cid = fcport->fw_cid;
699 io_req->task_params->itid = io_req->xid;
700 /* Return middle path commands on CQ 0 */
701 io_req->task_params->cq_rss_number = 0;
702 io_req->task_params->is_tape_device = fcport->dev_type;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800703
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200704 fc_hdr = &(mp_req->req_fc_hdr);
705 /* Set OX_ID and RX_ID based on driver task id */
706 fc_hdr->fh_ox_id = io_req->xid;
707 fc_hdr->fh_rx_id = htons(0xffff);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800708
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200709 /* Set up FC header information */
710 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
711 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
712 task_fc_hdr.type = fc_hdr->fh_type;
713 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
714 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
715 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
716 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800717
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200718 /* Set up s/g list parameters for request buffer */
719 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
720 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
721 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
722 tx_sgl_task_params.num_sges = 1;
723 /* Set PAGE_SIZE for now since sg element is that size ??? */
724 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
725 tx_sgl_task_params.small_mid_sge = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800726
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200727 /* Set up s/g list parameters for request buffer */
728 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
729 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
730 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
731 rx_sgl_task_params.num_sges = 1;
732 /* Set PAGE_SIZE for now since sg element is that size ??? */
733 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
734 rx_sgl_task_params.small_mid_sge = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800735
Dupuis, Chad61d86582017-02-15 06:28:23 -0800736
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200737 /*
738 * Last arg is 0 as previous code did not set that we wanted the
739 * fc header information.
740 */
741 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
742 &task_fc_hdr,
743 &tx_sgl_task_params,
744 &rx_sgl_task_params, 0);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800745}
746
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200747/* Presumed that fcport->rport_lock is held */
748u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800749{
Dupuis, Chad61d86582017-02-15 06:28:23 -0800750 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200751 u16 rval;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800752
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200753 rval = fcport->sq_prod_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800754
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200755 /* Adjust ring index */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800756 fcport->sq_prod_idx++;
757 fcport->fw_sq_prod_idx++;
758 if (fcport->sq_prod_idx == total_sqe)
759 fcport->sq_prod_idx = 0;
760
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200761 return rval;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800762}
763
764void qedf_ring_doorbell(struct qedf_rport *fcport)
765{
766 struct fcoe_db_data dbell = { 0 };
767
768 dbell.agg_flags = 0;
769
770 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
771 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
772 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
773 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
774
775 dbell.sq_prod = fcport->fw_sq_prod_idx;
Andrew Vasquez90ccf752019-03-26 00:38:40 -0700776 /* wmb makes sure that the BDs data is updated before updating the
777 * producer, otherwise FW may read old data from the BDs.
778 */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800779 wmb();
Andrew Vasquez90ccf752019-03-26 00:38:40 -0700780 barrier();
781 writel(*(u32 *)&dbell, fcport->p_doorbell);
782 /*
783 * Fence required to flush the write combined buffer, since another
784 * CPU may write to the same doorbell address and data may be lost
785 * due to relaxed order nature of write combined bar.
786 */
787 wmb();
Dupuis, Chad61d86582017-02-15 06:28:23 -0800788}
789
790static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
791 int8_t direction)
792{
793 struct qedf_ctx *qedf = fcport->qedf;
794 struct qedf_io_log *io_log;
795 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
796 unsigned long flags;
797 uint8_t op;
798
799 spin_lock_irqsave(&qedf->io_trace_lock, flags);
800
801 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
802 io_log->direction = direction;
803 io_log->task_id = io_req->xid;
804 io_log->port_id = fcport->rdata->ids.port_id;
805 io_log->lun = sc_cmd->device->lun;
806 io_log->op = op = sc_cmd->cmnd[0];
807 io_log->lba[0] = sc_cmd->cmnd[2];
808 io_log->lba[1] = sc_cmd->cmnd[3];
809 io_log->lba[2] = sc_cmd->cmnd[4];
810 io_log->lba[3] = sc_cmd->cmnd[5];
811 io_log->bufflen = scsi_bufflen(sc_cmd);
812 io_log->sg_count = scsi_sg_count(sc_cmd);
813 io_log->result = sc_cmd->result;
814 io_log->jiffies = jiffies;
Dupuis, Chad1afca6b2017-02-23 07:01:03 -0800815 io_log->refcount = kref_read(&io_req->refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800816
817 if (direction == QEDF_IO_TRACE_REQ) {
818 /* For requests we only care abot the submission CPU */
819 io_log->req_cpu = io_req->cpu;
820 io_log->int_cpu = 0;
821 io_log->rsp_cpu = 0;
822 } else if (direction == QEDF_IO_TRACE_RSP) {
823 io_log->req_cpu = io_req->cpu;
824 io_log->int_cpu = io_req->int_cpu;
825 io_log->rsp_cpu = smp_processor_id();
826 }
827
828 io_log->sge_type = io_req->sge_type;
829
830 qedf->io_trace_idx++;
831 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
832 qedf->io_trace_idx = 0;
833
834 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
835}
836
837int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
838{
839 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
840 struct Scsi_Host *host = sc_cmd->device->host;
841 struct fc_lport *lport = shost_priv(host);
842 struct qedf_ctx *qedf = lport_priv(lport);
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200843 struct e4_fcoe_task_context *task_ctx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800844 u16 xid;
845 enum fcoe_task_type req_type = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200846 struct fcoe_wqe *sqe;
847 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800848
849 /* Initialize rest of io_req fileds */
850 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
851 sc_cmd->SCp.ptr = (char *)io_req;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700852 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800853
854 /* Record which cpu this request is associated with */
855 io_req->cpu = smp_processor_id();
856
857 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
858 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
859 io_req->io_req_flags = QEDF_READ;
860 qedf->input_requests++;
861 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
862 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
863 io_req->io_req_flags = QEDF_WRITE;
864 qedf->output_requests++;
865 } else {
866 io_req->io_req_flags = 0;
867 qedf->control_requests++;
868 }
869
870 xid = io_req->xid;
871
872 /* Build buffer descriptor list for firmware from sg list */
873 if (qedf_build_bd_list_from_sg(io_req)) {
874 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700875 /* Release cmd will release io_req, but sc_cmd is assigned */
876 io_req->sc_cmd = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800877 kref_put(&io_req->refcount, qedf_release_cmd);
878 return -EAGAIN;
879 }
880
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700881 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
882 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200883 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700884 /* Release cmd will release io_req, but sc_cmd is assigned */
885 io_req->sc_cmd = NULL;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200886 kref_put(&io_req->refcount, qedf_release_cmd);
Chad Dupuisc5e06ba2019-03-26 00:38:35 -0700887 return -EINVAL;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200888 }
889
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700890 /* Record LUN number for later use if we neeed them */
891 io_req->lun = (int)sc_cmd->device->lun;
892
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200893 /* Obtain free SQE */
894 sqe_idx = qedf_get_sqe_idx(fcport);
895 sqe = &fcport->sq[sqe_idx];
896 memset(sqe, 0, sizeof(struct fcoe_wqe));
897
Dupuis, Chad61d86582017-02-15 06:28:23 -0800898 /* Get the task context */
899 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
900 if (!task_ctx) {
901 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
902 xid);
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700903 /* Release cmd will release io_req, but sc_cmd is assigned */
904 io_req->sc_cmd = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800905 kref_put(&io_req->refcount, qedf_release_cmd);
906 return -EINVAL;
907 }
908
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200909 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800910
911 /* Ring doorbell */
912 qedf_ring_doorbell(fcport);
913
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700914 /* Set that command is with the firmware now */
915 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
916
Dupuis, Chad61d86582017-02-15 06:28:23 -0800917 if (qedf_io_tracing && io_req->sc_cmd)
918 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
919
920 return false;
921}
922
923int
924qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
925{
926 struct fc_lport *lport = shost_priv(host);
927 struct qedf_ctx *qedf = lport_priv(lport);
928 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
929 struct fc_rport_libfc_priv *rp = rport->dd_data;
Colin Ian King8d6febb2018-02-06 14:03:16 +0000930 struct qedf_rport *fcport;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800931 struct qedf_ioreq *io_req;
932 int rc = 0;
933 int rval;
934 unsigned long flags = 0;
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700935 int num_sgs = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800936
Chad Dupuis3e2c11b2019-03-26 00:38:36 -0700937 num_sgs = scsi_sg_count(sc_cmd);
938 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
939 QEDF_ERR(&qedf->dbg_ctx,
940 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
941 num_sgs, QEDF_MAX_BDS_PER_CMD);
942 sc_cmd->result = DID_ERROR;
943 sc_cmd->scsi_done(sc_cmd);
944 return 0;
945 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800946
947 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
948 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
949 sc_cmd->result = DID_NO_CONNECT << 16;
950 sc_cmd->scsi_done(sc_cmd);
951 return 0;
952 }
953
Chad Dupuisa8f192b2018-04-25 06:08:54 -0700954 if (!qedf->pdev->msix_enabled) {
955 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
956 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
957 sc_cmd);
958 sc_cmd->result = DID_NO_CONNECT << 16;
959 sc_cmd->scsi_done(sc_cmd);
960 return 0;
961 }
962
Dupuis, Chad61d86582017-02-15 06:28:23 -0800963 rval = fc_remote_port_chkready(rport);
964 if (rval) {
965 sc_cmd->result = rval;
966 sc_cmd->scsi_done(sc_cmd);
967 return 0;
968 }
969
970 /* Retry command if we are doing a qed drain operation */
971 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
972 rc = SCSI_MLQUEUE_HOST_BUSY;
973 goto exit_qcmd;
974 }
975
976 if (lport->state != LPORT_ST_READY ||
977 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
978 rc = SCSI_MLQUEUE_HOST_BUSY;
979 goto exit_qcmd;
980 }
981
982 /* rport and tgt are allocated together, so tgt should be non-NULL */
983 fcport = (struct qedf_rport *)&rp[1];
984
985 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
986 /*
987 * Session is not offloaded yet. Let SCSI-ml retry
988 * the command.
989 */
990 rc = SCSI_MLQUEUE_TARGET_BUSY;
991 goto exit_qcmd;
992 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -0700993
994 atomic_inc(&fcport->ios_to_queue);
995
Dupuis, Chad61d86582017-02-15 06:28:23 -0800996 if (fcport->retry_delay_timestamp) {
997 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
998 fcport->retry_delay_timestamp = 0;
999 } else {
1000 /* If retry_delay timer is active, flow off the ML */
1001 rc = SCSI_MLQUEUE_TARGET_BUSY;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001002 atomic_dec(&fcport->ios_to_queue);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001003 goto exit_qcmd;
1004 }
1005 }
1006
1007 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1008 if (!io_req) {
1009 rc = SCSI_MLQUEUE_HOST_BUSY;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001010 atomic_dec(&fcport->ios_to_queue);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001011 goto exit_qcmd;
1012 }
1013
1014 io_req->sc_cmd = sc_cmd;
1015
1016 /* Take fcport->rport_lock for posting to fcport send queue */
1017 spin_lock_irqsave(&fcport->rport_lock, flags);
1018 if (qedf_post_io_req(fcport, io_req)) {
1019 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1020 /* Return SQE to pool */
1021 atomic_inc(&fcport->free_sqes);
1022 rc = SCSI_MLQUEUE_HOST_BUSY;
1023 }
1024 spin_unlock_irqrestore(&fcport->rport_lock, flags);
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001025 atomic_dec(&fcport->ios_to_queue);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001026
1027exit_qcmd:
1028 return rc;
1029}
1030
1031static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1032 struct fcoe_cqe_rsp_info *fcp_rsp)
1033{
1034 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1035 struct qedf_ctx *qedf = io_req->fcport->qedf;
1036 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1037 int fcp_sns_len = 0;
1038 int fcp_rsp_len = 0;
1039 uint8_t *rsp_info, *sense_data;
1040
1041 io_req->fcp_status = FC_GOOD;
1042 io_req->fcp_resid = 0;
1043 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1044 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1045 io_req->fcp_resid = fcp_rsp->fcp_resid;
1046
1047 io_req->scsi_comp_flags = rsp_flags;
1048 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1049 fcp_rsp->scsi_status_code;
1050
1051 if (rsp_flags &
1052 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1053 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1054
1055 if (rsp_flags &
1056 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1057 fcp_sns_len = fcp_rsp->fcp_sns_len;
1058
1059 io_req->fcp_rsp_len = fcp_rsp_len;
1060 io_req->fcp_sns_len = fcp_sns_len;
1061 rsp_info = sense_data = io_req->sense_buffer;
1062
1063 /* fetch fcp_rsp_code */
1064 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1065 /* Only for task management function */
1066 io_req->fcp_rsp_code = rsp_info[3];
1067 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1068 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1069 /* Adjust sense-data location. */
1070 sense_data += fcp_rsp_len;
1071 }
1072
1073 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1074 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1075 "Truncating sense buffer\n");
1076 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1077 }
1078
Dupuis, Chad16a61112017-06-02 12:02:05 -07001079 /* The sense buffer can be NULL for TMF commands */
1080 if (sc_cmd->sense_buffer) {
1081 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1082 if (fcp_sns_len)
1083 memcpy(sc_cmd->sense_buffer, sense_data,
1084 fcp_sns_len);
1085 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001086}
1087
1088static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1089{
1090 struct scsi_cmnd *sc = io_req->sc_cmd;
1091
1092 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1093 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1094 scsi_sg_count(sc), sc->sc_data_direction);
1095 io_req->bd_tbl->bd_valid = 0;
1096 }
1097}
1098
1099void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1100 struct qedf_ioreq *io_req)
1101{
1102 u16 xid, rval;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02001103 struct e4_fcoe_task_context *task_ctx;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001104 struct scsi_cmnd *sc_cmd;
1105 struct fcoe_cqe_rsp_info *fcp_rsp;
1106 struct qedf_rport *fcport;
1107 int refcount;
1108 u16 scope, qualifier = 0;
1109 u8 fw_residual_flag = 0;
1110
1111 if (!io_req)
1112 return;
1113 if (!cqe)
1114 return;
1115
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001116 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1117 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1118 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1119 QEDF_ERR(&qedf->dbg_ctx,
1120 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1121 io_req->xid);
1122 return;
1123 }
1124
Dupuis, Chad61d86582017-02-15 06:28:23 -08001125 xid = io_req->xid;
1126 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1127 sc_cmd = io_req->sc_cmd;
1128 fcp_rsp = &cqe->cqe_info.rsp_info;
1129
1130 if (!sc_cmd) {
1131 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1132 return;
1133 }
1134
1135 if (!sc_cmd->SCp.ptr) {
1136 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1137 "another context.\n");
1138 return;
1139 }
1140
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001141 if (!sc_cmd->device) {
1142 QEDF_ERR(&qedf->dbg_ctx,
1143 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1144 return;
1145 }
1146
Dupuis, Chad61d86582017-02-15 06:28:23 -08001147 if (!sc_cmd->request) {
1148 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1149 "sc_cmd=%p.\n", sc_cmd);
1150 return;
1151 }
1152
Dupuis, Chad61d86582017-02-15 06:28:23 -08001153 if (!sc_cmd->request->q) {
1154 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1155 "is not valid, sc_cmd=%p.\n", sc_cmd);
1156 return;
1157 }
1158
1159 fcport = io_req->fcport;
1160
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001161 /*
1162 * When flush is active, let the cmds be completed from the cleanup
1163 * context
1164 */
1165 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1166 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1167 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1168 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1169 "Dropping good completion xid=0x%x as fcport is flushing",
1170 io_req->xid);
1171 return;
1172 }
1173
Dupuis, Chad61d86582017-02-15 06:28:23 -08001174 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1175
1176 qedf_unmap_sg_list(qedf, io_req);
1177
1178 /* Check for FCP transport error */
1179 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1180 QEDF_ERR(&(qedf->dbg_ctx),
1181 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1182 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1183 io_req->fcp_rsp_code);
1184 sc_cmd->result = DID_BUS_BUSY << 16;
1185 goto out;
1186 }
1187
1188 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1189 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1190 if (fw_residual_flag) {
1191 QEDF_ERR(&(qedf->dbg_ctx),
1192 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1193 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1194 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1195 cqe->cqe_info.rsp_info.fw_residual);
1196
1197 if (io_req->cdb_status == 0)
1198 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1199 else
1200 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1201
1202 /* Abort the command since we did not get all the data */
1203 init_completion(&io_req->abts_done);
1204 rval = qedf_initiate_abts(io_req, true);
1205 if (rval) {
1206 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1207 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1208 }
1209
1210 /*
1211 * Set resid to the whole buffer length so we won't try to resue
1212 * any previously data.
1213 */
1214 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1215 goto out;
1216 }
1217
1218 switch (io_req->fcp_status) {
1219 case FC_GOOD:
1220 if (io_req->cdb_status == 0) {
1221 /* Good I/O completion */
1222 sc_cmd->result = DID_OK << 16;
1223 } else {
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001224 refcount = kref_read(&io_req->refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001225 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
Joe Perchesdb6b2062017-03-04 00:07:04 -08001226 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
Dupuis, Chad61d86582017-02-15 06:28:23 -08001227 "lba=%02x%02x%02x%02x cdb_status=%d "
1228 "fcp_resid=0x%x refcount=%d.\n",
1229 qedf->lport->host->host_no, sc_cmd->device->id,
1230 sc_cmd->device->lun, io_req->xid,
1231 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1232 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1233 io_req->cdb_status, io_req->fcp_resid,
1234 refcount);
1235 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1236
1237 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1238 io_req->cdb_status == SAM_STAT_BUSY) {
1239 /*
1240 * Check whether we need to set retry_delay at
1241 * all based on retry_delay module parameter
1242 * and the status qualifier.
1243 */
1244
1245 /* Upper 2 bits */
1246 scope = fcp_rsp->retry_delay_timer & 0xC000;
1247 /* Lower 14 bits */
1248 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1249
1250 if (qedf_retry_delay &&
1251 scope > 0 && qualifier > 0 &&
1252 qualifier <= 0x3FEF) {
1253 /* Check we don't go over the max */
1254 if (qualifier > QEDF_RETRY_DELAY_MAX)
1255 qualifier =
1256 QEDF_RETRY_DELAY_MAX;
1257 fcport->retry_delay_timestamp =
1258 jiffies + (qualifier * HZ / 10);
1259 }
Chad Dupuis642a0b32018-05-22 00:28:43 -07001260 /* Record stats */
1261 if (io_req->cdb_status ==
1262 SAM_STAT_TASK_SET_FULL)
1263 qedf->task_set_fulls++;
1264 else
1265 qedf->busy++;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001266 }
1267 }
1268 if (io_req->fcp_resid)
1269 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1270 break;
1271 default:
1272 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1273 io_req->fcp_status);
1274 break;
1275 }
1276
1277out:
1278 if (qedf_io_tracing)
1279 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1280
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001281 /*
1282 * We wait till the end of the function to clear the
1283 * outstanding bit in case we need to send an abort
1284 */
1285 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1286
Dupuis, Chad61d86582017-02-15 06:28:23 -08001287 io_req->sc_cmd = NULL;
1288 sc_cmd->SCp.ptr = NULL;
1289 sc_cmd->scsi_done(sc_cmd);
1290 kref_put(&io_req->refcount, qedf_release_cmd);
1291}
1292
1293/* Return a SCSI command in some other context besides a normal completion */
1294void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1295 int result)
1296{
1297 u16 xid;
1298 struct scsi_cmnd *sc_cmd;
1299 int refcount;
1300
1301 if (!io_req)
1302 return;
1303
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001304 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1305 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1306 "io_req:%p scsi_done handling already done\n",
1307 io_req);
1308 return;
1309 }
1310
1311 /*
1312 * We will be done with this command after this call so clear the
1313 * outstanding bit.
1314 */
1315 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1316
Dupuis, Chad61d86582017-02-15 06:28:23 -08001317 xid = io_req->xid;
1318 sc_cmd = io_req->sc_cmd;
1319
1320 if (!sc_cmd) {
1321 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1322 return;
1323 }
1324
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001325 if (!virt_addr_valid(sc_cmd)) {
1326 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
Chad Dupuis627cc7d2019-03-26 00:38:46 -07001327 goto bad_scsi_ptr;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001328 }
1329
Dupuis, Chad61d86582017-02-15 06:28:23 -08001330 if (!sc_cmd->SCp.ptr) {
1331 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1332 "another context.\n");
1333 return;
1334 }
1335
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001336 if (!sc_cmd->device) {
1337 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1338 sc_cmd);
Chad Dupuis627cc7d2019-03-26 00:38:46 -07001339 goto bad_scsi_ptr;
1340 }
1341
1342 if (!virt_addr_valid(sc_cmd->device)) {
1343 QEDF_ERR(&qedf->dbg_ctx,
1344 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1345 goto bad_scsi_ptr;
1346 }
1347
1348 if (!sc_cmd->sense_buffer) {
1349 QEDF_ERR(&qedf->dbg_ctx,
1350 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1351 sc_cmd);
1352 goto bad_scsi_ptr;
1353 }
1354
1355 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1356 QEDF_ERR(&qedf->dbg_ctx,
1357 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1358 sc_cmd);
1359 goto bad_scsi_ptr;
1360 }
1361
1362 if (!sc_cmd->scsi_done) {
1363 QEDF_ERR(&qedf->dbg_ctx,
1364 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1365 sc_cmd);
1366 goto bad_scsi_ptr;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001367 }
1368
Dupuis, Chad61d86582017-02-15 06:28:23 -08001369 qedf_unmap_sg_list(qedf, io_req);
1370
1371 sc_cmd->result = result << 16;
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001372 refcount = kref_read(&io_req->refcount);
Joe Perchesdb6b2062017-03-04 00:07:04 -08001373 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
Dupuis, Chad61d86582017-02-15 06:28:23 -08001374 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1375 "allowed=%d retries=%d refcount=%d.\n",
1376 qedf->lport->host->host_no, sc_cmd->device->id,
1377 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1378 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1379 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1380 refcount);
1381
1382 /*
1383 * Set resid to the whole buffer length so we won't try to resue any
1384 * previously read data
1385 */
1386 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1387
1388 if (qedf_io_tracing)
1389 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1390
1391 io_req->sc_cmd = NULL;
1392 sc_cmd->SCp.ptr = NULL;
1393 sc_cmd->scsi_done(sc_cmd);
1394 kref_put(&io_req->refcount, qedf_release_cmd);
Chad Dupuis6f15d0c2019-03-26 00:38:48 -07001395 return;
Chad Dupuis627cc7d2019-03-26 00:38:46 -07001396
1397bad_scsi_ptr:
1398 /*
1399 * Clear the io_req->sc_cmd backpointer so we don't try to process
1400 * this again
1401 */
1402 io_req->sc_cmd = NULL;
1403 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001404}
1405
1406/*
1407 * Handle warning type CQE completions. This is mainly used for REC timer
1408 * popping.
1409 */
1410void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1411 struct qedf_ioreq *io_req)
1412{
1413 int rval, i;
1414 struct qedf_rport *fcport = io_req->fcport;
1415 u64 err_warn_bit_map;
1416 u8 err_warn = 0xff;
1417
1418 if (!cqe)
1419 return;
1420
1421 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1422 "xid=0x%x\n", io_req->xid);
1423 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1424 "err_warn_bitmap=%08x:%08x\n",
1425 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1426 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1427 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1428 "rx_buff_off=%08x, rx_id=%04x\n",
1429 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1430 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1431 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1432
1433 /* Normalize the error bitmap value to an just an unsigned int */
1434 err_warn_bit_map = (u64)
1435 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1436 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1437 for (i = 0; i < 64; i++) {
1438 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1439 err_warn = i;
1440 break;
1441 }
1442 }
1443
1444 /* Check if REC TOV expired if this is a tape device */
1445 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1446 if (err_warn ==
1447 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1448 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1449 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1450 io_req->rx_buf_off =
1451 cqe->cqe_info.err_info.rx_buf_off;
1452 io_req->tx_buf_off =
1453 cqe->cqe_info.err_info.tx_buf_off;
1454 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1455 rval = qedf_send_rec(io_req);
1456 /*
1457 * We only want to abort the io_req if we
1458 * can't queue the REC command as we want to
1459 * keep the exchange open for recovery.
1460 */
1461 if (rval)
1462 goto send_abort;
1463 }
1464 return;
1465 }
1466 }
1467
1468send_abort:
1469 init_completion(&io_req->abts_done);
1470 rval = qedf_initiate_abts(io_req, true);
1471 if (rval)
1472 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1473}
1474
1475/* Cleanup a command when we receive an error detection completion */
1476void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1477 struct qedf_ioreq *io_req)
1478{
1479 int rval;
1480
1481 if (!cqe)
1482 return;
1483
1484 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1485 "xid=0x%x\n", io_req->xid);
1486 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1487 "err_warn_bitmap=%08x:%08x\n",
1488 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1489 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1490 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1491 "rx_buff_off=%08x, rx_id=%04x\n",
1492 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1493 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1494 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1495
1496 if (qedf->stop_io_on_error) {
1497 qedf_stop_all_io(qedf);
1498 return;
1499 }
1500
1501 init_completion(&io_req->abts_done);
1502 rval = qedf_initiate_abts(io_req, true);
1503 if (rval)
1504 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1505}
1506
1507static void qedf_flush_els_req(struct qedf_ctx *qedf,
1508 struct qedf_ioreq *els_req)
1509{
1510 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1511 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001512 kref_read(&els_req->refcount));
Dupuis, Chad61d86582017-02-15 06:28:23 -08001513
1514 /*
1515 * Need to distinguish this from a timeout when calling the
1516 * els_req->cb_func.
1517 */
1518 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1519
1520 /* Cancel the timer */
1521 cancel_delayed_work_sync(&els_req->timeout_work);
1522
1523 /* Call callback function to complete command */
1524 if (els_req->cb_func && els_req->cb_arg) {
1525 els_req->cb_func(els_req->cb_arg);
1526 els_req->cb_arg = NULL;
1527 }
1528
1529 /* Release kref for original initiate_els */
1530 kref_put(&els_req->refcount, qedf_release_cmd);
1531}
1532
1533/* A value of -1 for lun is a wild card that means flush all
1534 * active SCSI I/Os for the target.
1535 */
1536void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1537{
1538 struct qedf_ioreq *io_req;
1539 struct qedf_ctx *qedf;
1540 struct qedf_cmd_mgr *cmd_mgr;
1541 int i, rc;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001542 unsigned long flags;
1543 int flush_cnt = 0;
1544 int wait_cnt = 100;
1545 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001546
1547 if (!fcport)
1548 return;
1549
Chad Dupuis766639c2018-04-25 06:08:49 -07001550 /* Check that fcport is still offloaded */
1551 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1552 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1553 return;
1554 }
1555
Dupuis, Chad61d86582017-02-15 06:28:23 -08001556 qedf = fcport->qedf;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001557
1558 if (!qedf) {
1559 QEDF_ERR(NULL, "qedf is NULL.\n");
1560 return;
1561 }
1562
1563 /* Only wait for all commands to be queued in the Upload context */
1564 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1565 (lun == -1)) {
1566 while (atomic_read(&fcport->ios_to_queue)) {
1567 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1568 "Waiting for %d I/Os to be queued\n",
1569 atomic_read(&fcport->ios_to_queue));
1570 if (wait_cnt == 0) {
1571 QEDF_ERR(NULL,
1572 "%d IOs request could not be queued\n",
1573 atomic_read(&fcport->ios_to_queue));
1574 }
1575 msleep(20);
1576 wait_cnt--;
1577 }
1578 }
1579
Dupuis, Chad61d86582017-02-15 06:28:23 -08001580 cmd_mgr = qedf->cmd_mgr;
1581
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001582 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1583 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1584 atomic_read(&fcport->num_active_ios), fcport,
1585 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1586 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1587
1588 mutex_lock(&qedf->flush_mutex);
1589 if (lun == -1) {
1590 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1591 } else {
1592 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1593 fcport->lun_reset_lun = lun;
1594 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001595
1596 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1597 io_req = &cmd_mgr->cmds[i];
1598
1599 if (!io_req)
1600 continue;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001601 if (!io_req->fcport)
1602 continue;
1603
1604 spin_lock_irqsave(&cmd_mgr->lock, flags);
1605
1606 if (io_req->alloc) {
1607 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1608 if (io_req->cmd_type == QEDF_SCSI_CMD)
1609 QEDF_ERR(&qedf->dbg_ctx,
1610 "Allocated but not queued, xid=0x%x\n",
1611 io_req->xid);
1612 }
1613 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1614 } else {
1615 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1616 continue;
1617 }
1618
Dupuis, Chad61d86582017-02-15 06:28:23 -08001619 if (io_req->fcport != fcport)
1620 continue;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001621
1622 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1623 * but RRQ is still pending.
1624 * Workaround: Within qedf_send_rrq, we check if the fcport is
1625 * NULL, and we drop the ref on the io_req to clean it up.
1626 */
1627 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1628 refcount = kref_read(&io_req->refcount);
1629 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1630 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1631 io_req->xid, io_req->cmd_type, refcount);
1632 continue;
1633 }
1634
1635 /* Only consider flushing ELS during target reset */
1636 if (io_req->cmd_type == QEDF_ELS &&
1637 lun == -1) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08001638 rc = kref_get_unless_zero(&io_req->refcount);
1639 if (!rc) {
1640 QEDF_ERR(&(qedf->dbg_ctx),
Chad Dupuis8025c842018-04-25 06:08:56 -07001641 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1642 io_req, io_req->xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001643 continue;
1644 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001645 flush_cnt++;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001646 qedf_flush_els_req(qedf, io_req);
1647 /*
1648 * Release the kref and go back to the top of the
1649 * loop.
1650 */
1651 goto free_cmd;
1652 }
1653
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001654 if (io_req->cmd_type == QEDF_ABTS) {
1655 rc = kref_get_unless_zero(&io_req->refcount);
1656 if (!rc) {
1657 QEDF_ERR(&(qedf->dbg_ctx),
1658 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1659 io_req, io_req->xid);
1660 continue;
1661 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001662 if (lun != -1 && io_req->lun != lun)
1663 goto free_cmd;
1664
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001665 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1666 "Flushing abort xid=0x%x.\n", io_req->xid);
1667
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001668 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1669 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1670 "Putting reference for pending RRQ work xid=0x%x.\n",
1671 io_req->xid);
1672 kref_put(&io_req->refcount, qedf_release_cmd);
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001673 }
1674
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001675 /* Cancel any timeout work */
1676 cancel_delayed_work_sync(&io_req->timeout_work);
1677
1678 if (!test_bit(QEDF_CMD_IN_ABORT, &io_req->flags))
1679 goto free_cmd;
1680
1681 qedf_initiate_cleanup(io_req, true);
1682 flush_cnt++;
1683
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001684 /* Notify eh_abort handler that ABTS is complete */
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001685 kref_put(&io_req->refcount, qedf_release_cmd);
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001686 complete(&io_req->abts_done);
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001687
1688 goto free_cmd;
1689 }
1690
Dupuis, Chad61d86582017-02-15 06:28:23 -08001691 if (!io_req->sc_cmd)
1692 continue;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001693 if (!io_req->sc_cmd->device) {
1694 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1695 "Device backpointer NULL for sc_cmd=%p.\n",
1696 io_req->sc_cmd);
1697 /* Put reference for non-existent scsi_cmnd */
1698 io_req->sc_cmd = NULL;
1699 qedf_initiate_cleanup(io_req, false);
1700 kref_put(&io_req->refcount, qedf_release_cmd);
1701 continue;
1702 }
1703 if (lun > -1) {
1704 if (io_req->lun != lun)
Dupuis, Chad61d86582017-02-15 06:28:23 -08001705 continue;
1706 }
1707
1708 /*
1709 * Use kref_get_unless_zero in the unlikely case the command
1710 * we're about to flush was completed in the normal SCSI path
1711 */
1712 rc = kref_get_unless_zero(&io_req->refcount);
1713 if (!rc) {
1714 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
Chad Dupuis8025c842018-04-25 06:08:56 -07001715 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001716 continue;
1717 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001718
Dupuis, Chad61d86582017-02-15 06:28:23 -08001719 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1720 "Cleanup xid=0x%x.\n", io_req->xid);
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001721 flush_cnt++;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001722
1723 /* Cleanup task and return I/O mid-layer */
1724 qedf_initiate_cleanup(io_req, true);
1725
1726free_cmd:
1727 kref_put(&io_req->refcount, qedf_release_cmd);
1728 }
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001729
1730 wait_cnt = 60;
1731 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1732 "Flushed 0x%x I/Os, active=0x%x.\n",
1733 flush_cnt, atomic_read(&fcport->num_active_ios));
1734 /* Only wait for all commands to complete in the Upload context */
1735 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1736 (lun == -1)) {
1737 while (atomic_read(&fcport->num_active_ios)) {
1738 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1739 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1740 flush_cnt,
1741 atomic_read(&fcport->num_active_ios),
1742 wait_cnt);
1743 if (wait_cnt == 0) {
1744 QEDF_ERR(&qedf->dbg_ctx,
1745 "Flushed %d I/Os, active=%d.\n",
1746 flush_cnt,
1747 atomic_read(&fcport->num_active_ios));
1748 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1749 io_req = &cmd_mgr->cmds[i];
1750 if (io_req->fcport &&
1751 io_req->fcport == fcport) {
1752 refcount =
1753 kref_read(&io_req->refcount);
1754 QEDF_ERR(&qedf->dbg_ctx,
1755 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1756 io_req, io_req->xid,
1757 io_req->flags,
1758 io_req->sc_cmd,
1759 refcount,
1760 io_req->cmd_type);
1761 }
1762 }
1763 WARN_ON(1);
1764 break;
1765 }
1766 msleep(500);
1767 wait_cnt--;
1768 }
1769 }
1770
1771 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1772 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1773 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1774 mutex_unlock(&qedf->flush_mutex);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001775}
1776
1777/*
1778 * Initiate a ABTS middle path command. Note that we don't have to initialize
1779 * the task context for an ABTS task.
1780 */
1781int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1782{
1783 struct fc_lport *lport;
1784 struct qedf_rport *fcport = io_req->fcport;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001785 struct fc_rport_priv *rdata;
1786 struct qedf_ctx *qedf;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001787 u16 xid;
1788 u32 r_a_tov = 0;
1789 int rc = 0;
1790 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001791 struct fcoe_wqe *sqe;
1792 u16 sqe_idx;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001793 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001794
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001795 /* Sanity check qedf_rport before dereferencing any pointers */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001796 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001797 QEDF_ERR(NULL, "tgt not offloaded\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08001798 rc = 1;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001799 goto out;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001800 }
1801
Hannes Reinecke56efc302019-03-26 00:38:49 -07001802 qedf = fcport->qedf;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001803 rdata = fcport->rdata;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001804
1805 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1806 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1807 rc = 1;
1808 goto out;
1809 }
1810
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001811 r_a_tov = rdata->r_a_tov;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001812 lport = qedf->lport;
1813
Dupuis, Chad61d86582017-02-15 06:28:23 -08001814 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1815 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1816 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001817 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001818 }
1819
1820 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1821 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1822 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001823 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001824 }
1825
1826 /* Ensure room on SQ */
1827 if (!atomic_read(&fcport->free_sqes)) {
1828 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1829 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001830 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001831 }
1832
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001833 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1834 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1835 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001836 goto drop_rdata_kref;
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001837 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001838
Chad Dupuisf3690a82018-04-25 06:09:03 -07001839 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1840 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1841 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1842 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1843 "cleanup or abort processing or already "
1844 "completed.\n", io_req->xid);
1845 rc = 1;
Hannes Reinecke56efc302019-03-26 00:38:49 -07001846 goto drop_rdata_kref;
Chad Dupuisf3690a82018-04-25 06:09:03 -07001847 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001848
1849 kref_get(&io_req->refcount);
1850
1851 xid = io_req->xid;
1852 qedf->control_requests++;
1853 qedf->packet_aborts++;
1854
Dupuis, Chad61d86582017-02-15 06:28:23 -08001855 /* Set the command type to abort */
1856 io_req->cmd_type = QEDF_ABTS;
1857 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1858
1859 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001860 refcount = kref_read(&io_req->refcount);
1861 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1862 "ABTS io_req xid = 0x%x refcount=%d\n",
1863 xid, refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001864
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001865 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001866
1867 spin_lock_irqsave(&fcport->rport_lock, flags);
1868
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001869 sqe_idx = qedf_get_sqe_idx(fcport);
1870 sqe = &fcport->sq[sqe_idx];
1871 memset(sqe, 0, sizeof(struct fcoe_wqe));
1872 io_req->task_params->sqe = sqe;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001873
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001874 init_initiator_abort_fcoe_task(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001875 qedf_ring_doorbell(fcport);
1876
1877 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1878
Hannes Reinecke56efc302019-03-26 00:38:49 -07001879drop_rdata_kref:
1880 kref_put(&rdata->kref, fc_rport_destroy);
Chad Dupuis92bbccd2018-04-25 06:09:01 -07001881out:
Dupuis, Chad61d86582017-02-15 06:28:23 -08001882 return rc;
1883}
1884
1885void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1886 struct qedf_ioreq *io_req)
1887{
1888 uint32_t r_ctl;
1889 uint16_t xid;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001890 int rc;
1891 struct qedf_rport *fcport = io_req->fcport;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001892
1893 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1894 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1895
Dupuis, Chad61d86582017-02-15 06:28:23 -08001896 xid = io_req->xid;
1897 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1898
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001899 /* This was added at a point when we were scheduling abts_compl &
1900 * cleanup_compl on different CPUs and there was a possibility of
1901 * the io_req to be freed from the other context before we got here.
1902 */
1903 if (!fcport) {
1904 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1905 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1906 io_req->xid);
1907 return;
1908 }
1909
1910 /*
1911 * When flush is active, let the cmds be completed from the cleanup
1912 * context
1913 */
1914 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1915 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1916 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1917 "Dropping ABTS completion xid=0x%x as fcport is flushing",
1918 io_req->xid);
1919 return;
1920 }
1921
1922 if (!cancel_delayed_work(&io_req->timeout_work)) {
1923 QEDF_ERR(&qedf->dbg_ctx,
1924 "Wasn't able to cancel abts timeout work.\n");
1925 }
1926
Dupuis, Chad61d86582017-02-15 06:28:23 -08001927 switch (r_ctl) {
1928 case FC_RCTL_BA_ACC:
1929 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1930 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1931 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07001932 rc = kref_get_unless_zero(&io_req->refcount);
1933 if (!rc) {
1934 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1935 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
1936 io_req->xid);
1937 return;
1938 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001939 /*
1940 * Dont release this cmd yet. It will be relesed
1941 * after we get RRQ response
1942 */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001943 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1944 msecs_to_jiffies(qedf->lport->r_a_tov));
1945 break;
1946 /* For error cases let the cleanup return the command */
1947 case FC_RCTL_BA_RJT:
1948 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1949 "ABTS response - RJT\n");
1950 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1951 break;
1952 default:
1953 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1954 break;
1955 }
1956
1957 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1958
1959 if (io_req->sc_cmd) {
1960 if (io_req->return_scsi_cmd_on_abts)
1961 qedf_scsi_done(qedf, io_req, DID_ERROR);
1962 }
1963
1964 /* Notify eh_abort handler that ABTS is complete */
1965 complete(&io_req->abts_done);
1966
1967 kref_put(&io_req->refcount, qedf_release_cmd);
1968}
1969
1970int qedf_init_mp_req(struct qedf_ioreq *io_req)
1971{
1972 struct qedf_mp_req *mp_req;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001973 struct scsi_sge *mp_req_bd;
1974 struct scsi_sge *mp_resp_bd;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001975 struct qedf_ctx *qedf = io_req->fcport->qedf;
1976 dma_addr_t addr;
1977 uint64_t sz;
1978
1979 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1980
1981 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1982 memset(mp_req, 0, sizeof(struct qedf_mp_req));
1983
1984 if (io_req->cmd_type != QEDF_ELS) {
1985 mp_req->req_len = sizeof(struct fcp_cmnd);
1986 io_req->data_xfer_len = mp_req->req_len;
1987 } else
1988 mp_req->req_len = io_req->data_xfer_len;
1989
1990 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1991 &mp_req->req_buf_dma, GFP_KERNEL);
1992 if (!mp_req->req_buf) {
1993 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1994 qedf_free_mp_resc(io_req);
1995 return -ENOMEM;
1996 }
1997
1998 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1999 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2000 if (!mp_req->resp_buf) {
2001 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2002 "buffer\n");
2003 qedf_free_mp_resc(io_req);
2004 return -ENOMEM;
2005 }
2006
2007 /* Allocate and map mp_req_bd and mp_resp_bd */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002008 sz = sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002009 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2010 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2011 if (!mp_req->mp_req_bd) {
2012 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2013 qedf_free_mp_resc(io_req);
2014 return -ENOMEM;
2015 }
2016
2017 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2018 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2019 if (!mp_req->mp_resp_bd) {
2020 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2021 qedf_free_mp_resc(io_req);
2022 return -ENOMEM;
2023 }
2024
2025 /* Fill bd table */
2026 addr = mp_req->req_buf_dma;
2027 mp_req_bd = mp_req->mp_req_bd;
2028 mp_req_bd->sge_addr.lo = U64_LO(addr);
2029 mp_req_bd->sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002030 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002031
2032 /*
2033 * MP buffer is either a task mgmt command or an ELS.
2034 * So the assumption is that it consumes a single bd
2035 * entry in the bd table
2036 */
2037 mp_resp_bd = mp_req->mp_resp_bd;
2038 addr = mp_req->resp_buf_dma;
2039 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2040 mp_resp_bd->sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002041 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002042
2043 return 0;
2044}
2045
2046/*
2047 * Last ditch effort to clear the port if it's stuck. Used only after a
2048 * cleanup task times out.
2049 */
2050static void qedf_drain_request(struct qedf_ctx *qedf)
2051{
2052 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2053 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2054 return;
2055 }
2056
2057 /* Set bit to return all queuecommand requests as busy */
2058 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2059
2060 /* Call qed drain request for function. Should be synchronous */
2061 qed_ops->common->drain(qedf->cdev);
2062
2063 /* Settle time for CQEs to be returned */
2064 msleep(100);
2065
2066 /* Unplug and continue */
2067 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2068}
2069
2070/*
2071 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2072 * FAILURE.
2073 */
2074int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2075 bool return_scsi_cmd_on_abts)
2076{
2077 struct qedf_rport *fcport;
2078 struct qedf_ctx *qedf;
2079 uint16_t xid;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02002080 struct e4_fcoe_task_context *task;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002081 int tmo = 0;
2082 int rc = SUCCESS;
2083 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002084 struct fcoe_wqe *sqe;
2085 u16 sqe_idx;
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002086 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002087
2088 fcport = io_req->fcport;
2089 if (!fcport) {
2090 QEDF_ERR(NULL, "fcport is NULL.\n");
2091 return SUCCESS;
2092 }
2093
Chad Dupuisff34e8e2017-05-31 06:33:52 -07002094 /* Sanity check qedf_rport before dereferencing any pointers */
2095 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2096 QEDF_ERR(NULL, "tgt not offloaded\n");
2097 rc = 1;
2098 return SUCCESS;
2099 }
2100
Dupuis, Chad61d86582017-02-15 06:28:23 -08002101 qedf = fcport->qedf;
2102 if (!qedf) {
2103 QEDF_ERR(NULL, "qedf is NULL.\n");
2104 return SUCCESS;
2105 }
2106
2107 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002108 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08002109 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2110 "cleanup processing or already completed.\n",
2111 io_req->xid);
2112 return SUCCESS;
2113 }
Chad Dupuis96b17652019-03-26 00:38:39 -07002114 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002115
2116 /* Ensure room on SQ */
2117 if (!atomic_read(&fcport->free_sqes)) {
2118 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
Chad Dupuis96b17652019-03-26 00:38:39 -07002119 /* Need to make sure we clear the flag since it was set */
2120 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002121 return FAILED;
2122 }
2123
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002124 if (io_req->cmd_type == QEDF_CLEANUP) {
2125 QEDF_ERR(&qedf->dbg_ctx,
2126 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2127 io_req->xid, io_req->cmd_type);
2128 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2129 return SUCCESS;
2130 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002131
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002132 refcount = kref_read(&io_req->refcount);
2133
2134 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2135 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d\n",
2136 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2137 refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002138
2139 /* Cleanup cmds re-use the same TID as the original I/O */
2140 xid = io_req->xid;
2141 io_req->cmd_type = QEDF_CLEANUP;
2142 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2143
Dupuis, Chad61d86582017-02-15 06:28:23 -08002144 task = qedf_get_task_mem(&qedf->tasks, xid);
2145
Chad Dupuis96b17652019-03-26 00:38:39 -07002146 init_completion(&io_req->cleanup_done);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002147
Dupuis, Chad61d86582017-02-15 06:28:23 -08002148 spin_lock_irqsave(&fcport->rport_lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002149
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002150 sqe_idx = qedf_get_sqe_idx(fcport);
2151 sqe = &fcport->sq[sqe_idx];
2152 memset(sqe, 0, sizeof(struct fcoe_wqe));
2153 io_req->task_params->sqe = sqe;
2154
2155 init_initiator_cleanup_fcoe_task(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002156 qedf_ring_doorbell(fcport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002157
Dupuis, Chad61d86582017-02-15 06:28:23 -08002158 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2159
Chad Dupuis96b17652019-03-26 00:38:39 -07002160 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2161 QEDF_CLEANUP_TIMEOUT * HZ);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002162
2163 if (!tmo) {
2164 rc = FAILED;
2165 /* Timeout case */
2166 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2167 "xid=%x.\n", io_req->xid);
2168 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2169 /* Issue a drain request if cleanup task times out */
2170 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2171 qedf_drain_request(qedf);
2172 }
2173
Shyam Sundar5d5e55652019-03-26 00:38:37 -07002174 /* If it TASK MGMT handle it, reference will be decreased
2175 * in qedf_execute_tmf
2176 */
2177 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
2178 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2179 io_req->sc_cmd = NULL;
2180 complete(&io_req->tm_done);
2181 }
2182
Dupuis, Chad61d86582017-02-15 06:28:23 -08002183 if (io_req->sc_cmd) {
2184 if (io_req->return_scsi_cmd_on_abts)
2185 qedf_scsi_done(qedf, io_req, DID_ERROR);
2186 }
2187
2188 if (rc == SUCCESS)
2189 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2190 else
2191 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2192
2193 return rc;
2194}
2195
2196void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2197 struct qedf_ioreq *io_req)
2198{
2199 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2200 io_req->xid);
2201
2202 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2203
2204 /* Complete so we can finish cleaning up the I/O */
Chad Dupuis96b17652019-03-26 00:38:39 -07002205 complete(&io_req->cleanup_done);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002206}
2207
2208static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2209 uint8_t tm_flags)
2210{
2211 struct qedf_ioreq *io_req;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02002212 struct e4_fcoe_task_context *task;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002213 struct qedf_ctx *qedf = fcport->qedf;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002214 struct fc_lport *lport = qedf->lport;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002215 int rc = 0;
2216 uint16_t xid;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002217 int tmo = 0;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002218 int lun = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002219 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002220 struct fcoe_wqe *sqe;
2221 u16 sqe_idx;
Hannes Reinecke56efc302019-03-26 00:38:49 -07002222 struct fc_rport_priv *rdata = fcport->rdata;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002223
2224 if (!sc_cmd) {
2225 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2226 return FAILED;
2227 }
2228
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002229 lun = (int)sc_cmd->device->lun;
Chad Dupuis57a35482017-05-31 06:33:58 -07002230 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08002231 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2232 rc = FAILED;
2233 return FAILED;
2234 }
2235
Hannes Reinecke56efc302019-03-26 00:38:49 -07002236 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2237 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, "stale rport\n");
2238 return FAILED;
2239 }
2240 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2241 "portid = 0x%x tm_flags = %d\n",
2242 rdata->ids.port_id, tm_flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002243
2244 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2245 if (!io_req) {
2246 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2247 rc = -EAGAIN;
2248 goto reset_tmf_err;
2249 }
2250
Chad Dupuis642a0b32018-05-22 00:28:43 -07002251 if (tm_flags == FCP_TMF_LUN_RESET)
2252 qedf->lun_resets++;
2253 else if (tm_flags == FCP_TMF_TGT_RESET)
2254 qedf->target_resets++;
2255
Dupuis, Chad61d86582017-02-15 06:28:23 -08002256 /* Initialize rest of io_req fields */
2257 io_req->sc_cmd = sc_cmd;
2258 io_req->fcport = fcport;
2259 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2260
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002261 /* Record which cpu this request is associated with */
Dupuis, Chad61d86582017-02-15 06:28:23 -08002262 io_req->cpu = smp_processor_id();
2263
Dupuis, Chad61d86582017-02-15 06:28:23 -08002264 /* Set TM flags */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002265 io_req->io_req_flags = QEDF_READ;
2266 io_req->data_xfer_len = 0;
2267 io_req->tm_flags = tm_flags;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002268
2269 /* Default is to return a SCSI command when an error occurs */
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002270 io_req->return_scsi_cmd_on_abts = false;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002271
Dupuis, Chad61d86582017-02-15 06:28:23 -08002272 /* Obtain exchange id */
2273 xid = io_req->xid;
2274
2275 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2276 "0x%x\n", xid);
2277
2278 /* Initialize task context for this IO request */
2279 task = qedf_get_task_mem(&qedf->tasks, xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002280
2281 init_completion(&io_req->tm_done);
2282
Dupuis, Chad61d86582017-02-15 06:28:23 -08002283 spin_lock_irqsave(&fcport->rport_lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002284
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002285 sqe_idx = qedf_get_sqe_idx(fcport);
2286 sqe = &fcport->sq[sqe_idx];
2287 memset(sqe, 0, sizeof(struct fcoe_wqe));
2288
2289 qedf_init_task(fcport, lport, io_req, task, sqe);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002290 qedf_ring_doorbell(fcport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002291
Dupuis, Chad61d86582017-02-15 06:28:23 -08002292 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2293
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002294 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002295 tmo = wait_for_completion_timeout(&io_req->tm_done,
2296 QEDF_TM_TIMEOUT * HZ);
2297
2298 if (!tmo) {
2299 rc = FAILED;
2300 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002301 /* Clear outstanding bit since command timed out */
2302 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2303 io_req->sc_cmd = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002304 } else {
2305 /* Check TMF response code */
2306 if (io_req->fcp_rsp_code == 0)
2307 rc = SUCCESS;
2308 else
2309 rc = FAILED;
2310 }
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002311 /*
2312 * Double check that fcport has not gone into an uploading state before
2313 * executing the command flush for the LUN/target.
2314 */
2315 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2316 QEDF_ERR(&qedf->dbg_ctx,
2317 "fcport is uploading, not executing flush.\n");
2318 goto no_flush;
2319 }
2320 /* We do not need this io_req any more */
2321 kref_put(&io_req->refcount, qedf_release_cmd);
2322
Dupuis, Chad61d86582017-02-15 06:28:23 -08002323
2324 if (tm_flags == FCP_TMF_LUN_RESET)
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002325 qedf_flush_active_ios(fcport, lun);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002326 else
2327 qedf_flush_active_ios(fcport, -1);
2328
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002329no_flush:
Dupuis, Chad61d86582017-02-15 06:28:23 -08002330 if (rc != SUCCESS) {
2331 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2332 rc = FAILED;
2333 } else {
2334 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2335 rc = SUCCESS;
2336 }
2337reset_tmf_err:
Hannes Reinecke56efc302019-03-26 00:38:49 -07002338 kref_put(&rdata->kref, fc_rport_destroy);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002339 return rc;
2340}
2341
2342int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2343{
2344 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2345 struct fc_rport_libfc_priv *rp = rport->dd_data;
2346 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2347 struct qedf_ctx *qedf;
2348 struct fc_lport *lport;
2349 int rc = SUCCESS;
2350 int rval;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002351 struct qedf_ioreq *io_req = NULL;
2352 int ref_cnt = 0;
2353 struct fc_rport_priv *rdata = fcport->rdata;
2354
2355 QEDF_ERR(NULL,
2356 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2357 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
2358 (int)sc_cmd->device->lun);
2359
2360 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2361 QEDF_ERR(NULL, "stale rport\n");
2362 return FAILED;
2363 }
2364
2365 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2366 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2367 "LUN RESET");
2368
2369 if (sc_cmd->SCp.ptr) {
2370 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2371 ref_cnt = kref_read(&io_req->refcount);
2372 QEDF_ERR(NULL,
2373 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2374 io_req, io_req->xid, ref_cnt);
2375 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002376
2377 rval = fc_remote_port_chkready(rport);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002378 if (rval) {
2379 QEDF_ERR(NULL, "device_reset rport not ready\n");
2380 rc = FAILED;
2381 goto tmf_err;
2382 }
2383
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002384 rc = fc_block_scsi_eh(sc_cmd);
2385 if (rc)
2386 return rc;
2387
2388 if (!fcport) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08002389 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2390 rc = FAILED;
2391 goto tmf_err;
2392 }
2393
2394 qedf = fcport->qedf;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002395
2396 if (!qedf) {
2397 QEDF_ERR(NULL, "qedf is NULL.\n");
2398 rc = FAILED;
2399 goto tmf_err;
2400 }
2401
Dupuis, Chad61d86582017-02-15 06:28:23 -08002402 lport = qedf->lport;
2403
2404 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2405 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2406 rc = SUCCESS;
2407 goto tmf_err;
2408 }
2409
2410 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2411 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2412 rc = FAILED;
2413 goto tmf_err;
2414 }
2415
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002416 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2417 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
2418 rc = FAILED;
2419 goto tmf_err;
2420 }
2421
Dupuis, Chad61d86582017-02-15 06:28:23 -08002422 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2423
2424tmf_err:
2425 return rc;
2426}
2427
2428void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2429 struct qedf_ioreq *io_req)
2430{
2431 struct fcoe_cqe_rsp_info *fcp_rsp;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002432
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07002433 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2434
Dupuis, Chad61d86582017-02-15 06:28:23 -08002435 fcp_rsp = &cqe->cqe_info.rsp_info;
2436 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2437
2438 io_req->sc_cmd = NULL;
2439 complete(&io_req->tm_done);
2440}
2441
2442void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2443 struct fcoe_cqe *cqe)
2444{
2445 unsigned long flags;
2446 uint16_t tmp;
2447 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2448 u32 payload_len, crc;
2449 struct fc_frame_header *fh;
2450 struct fc_frame *fp;
2451 struct qedf_io_work *io_work;
2452 u32 bdq_idx;
2453 void *bdq_addr;
Tomer Tayarda090912017-12-27 19:30:07 +02002454 struct scsi_bd *p_bd_info;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002455
Tomer Tayarda090912017-12-27 19:30:07 +02002456 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002457 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
Tomer Tayarda090912017-12-27 19:30:07 +02002458 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2459 le32_to_cpu(p_bd_info->address.hi),
2460 le32_to_cpu(p_bd_info->address.lo),
2461 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2462 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2463 qedf->bdq_prod_idx, pktlen);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002464
Tomer Tayarda090912017-12-27 19:30:07 +02002465 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002466 if (bdq_idx >= QEDF_BDQ_SIZE) {
2467 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2468 bdq_idx);
2469 goto increment_prod;
2470 }
2471
2472 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2473 if (!bdq_addr) {
2474 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2475 "unsolicited packet.\n");
2476 goto increment_prod;
2477 }
2478
2479 if (qedf_dump_frames) {
2480 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2481 "BDQ frame is at addr=%p.\n", bdq_addr);
2482 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2483 (void *)bdq_addr, pktlen, false);
2484 }
2485
2486 /* Allocate frame */
2487 payload_len = pktlen - sizeof(struct fc_frame_header);
2488 fp = fc_frame_alloc(qedf->lport, payload_len);
2489 if (!fp) {
2490 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2491 goto increment_prod;
2492 }
2493
2494 /* Copy data from BDQ buffer into fc_frame struct */
2495 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2496 memcpy(fh, (void *)bdq_addr, pktlen);
2497
2498 /* Initialize the frame so libfc sees it as a valid frame */
2499 crc = fcoe_fc_crc(fp);
2500 fc_frame_init(fp);
2501 fr_dev(fp) = qedf->lport;
2502 fr_sof(fp) = FC_SOF_I3;
2503 fr_eof(fp) = FC_EOF_T;
2504 fr_crc(fp) = cpu_to_le32(~crc);
2505
2506 /*
2507 * We need to return the frame back up to libfc in a non-atomic
2508 * context
2509 */
2510 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2511 if (!io_work) {
2512 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2513 "work for I/O completion.\n");
2514 fc_frame_free(fp);
2515 goto increment_prod;
2516 }
2517 memset(io_work, 0, sizeof(struct qedf_io_work));
2518
2519 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2520
2521 /* Copy contents of CQE for deferred processing */
2522 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2523
2524 io_work->qedf = qedf;
2525 io_work->fp = fp;
2526
2527 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2528increment_prod:
2529 spin_lock_irqsave(&qedf->hba_lock, flags);
2530
2531 /* Increment producer to let f/w know we've handled the frame */
2532 qedf->bdq_prod_idx++;
2533
2534 /* Producer index wraps at uint16_t boundary */
2535 if (qedf->bdq_prod_idx == 0xffff)
2536 qedf->bdq_prod_idx = 0;
2537
2538 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2539 tmp = readw(qedf->bdq_primary_prod);
2540 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2541 tmp = readw(qedf->bdq_secondary_prod);
2542
2543 spin_unlock_irqrestore(&qedf->hba_lock, flags);
2544}