blob: ded386036c2771a166a648c2d9a1960187501f3e [file] [log] [blame]
Dupuis, Chad61d86582017-02-15 06:28:23 -08001/*
2 * QLogic FCoE Offload Driver
Chad Dupuis12d0b122017-05-31 06:33:49 -07003 * Copyright (c) 2016-2017 Cavium Inc.
Dupuis, Chad61d86582017-02-15 06:28:23 -08004 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11#include "qedf.h"
12#include <scsi/scsi_tcq.h>
13
14void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
16{
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
19}
20
21static void qedf_cmd_timeout(struct work_struct *work)
22{
23
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf = io_req->fcport->qedf;
27 struct qedf_rport *fcport = io_req->fcport;
28 u8 op = 0;
29
30 switch (io_req->cmd_type) {
31 case QEDF_ABTS:
32 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
33 io_req->xid);
34 /* Cleanup timed out ABTS */
35 qedf_initiate_cleanup(io_req, true);
36 complete(&io_req->abts_done);
37
38 /*
39 * Need to call kref_put for reference taken when initiate_abts
40 * was called since abts_compl won't be called now that we've
41 * cleaned up the task.
42 */
43 kref_put(&io_req->refcount, qedf_release_cmd);
44
45 /*
46 * Now that the original I/O and the ABTS are complete see
47 * if we need to reconnect to the target.
48 */
49 qedf_restart_rport(fcport);
50 break;
51 case QEDF_ELS:
52 kref_get(&io_req->refcount);
53 /*
54 * Don't attempt to clean an ELS timeout as any subseqeunt
55 * ABTS or cleanup requests just hang. For now just free
56 * the resources of the original I/O and the RRQ
57 */
58 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
59 io_req->xid);
60 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
61 /* Call callback function to complete command */
62 if (io_req->cb_func && io_req->cb_arg) {
63 op = io_req->cb_arg->op;
64 io_req->cb_func(io_req->cb_arg);
65 io_req->cb_arg = NULL;
66 }
67 qedf_initiate_cleanup(io_req, true);
68 kref_put(&io_req->refcount, qedf_release_cmd);
69 break;
70 case QEDF_SEQ_CLEANUP:
71 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
72 "xid=0x%x.\n", io_req->xid);
73 qedf_initiate_cleanup(io_req, true);
74 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
75 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
76 break;
77 default:
78 break;
79 }
80}
81
82void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
83{
84 struct io_bdt *bdt_info;
85 struct qedf_ctx *qedf = cmgr->qedf;
86 size_t bd_tbl_sz;
87 u16 min_xid = QEDF_MIN_XID;
88 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
89 int num_ios;
90 int i;
91 struct qedf_ioreq *io_req;
92
93 num_ios = max_xid - min_xid + 1;
94
95 /* Free fcoe_bdt_ctx structures */
96 if (!cmgr->io_bdt_pool)
97 goto free_cmd_pool;
98
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020099 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800100 for (i = 0; i < num_ios; i++) {
101 bdt_info = cmgr->io_bdt_pool[i];
102 if (bdt_info->bd_tbl) {
103 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
104 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
105 bdt_info->bd_tbl = NULL;
106 }
107 }
108
109 /* Destroy io_bdt pool */
110 for (i = 0; i < num_ios; i++) {
111 kfree(cmgr->io_bdt_pool[i]);
112 cmgr->io_bdt_pool[i] = NULL;
113 }
114
115 kfree(cmgr->io_bdt_pool);
116 cmgr->io_bdt_pool = NULL;
117
118free_cmd_pool:
119
120 for (i = 0; i < num_ios; i++) {
121 io_req = &cmgr->cmds[i];
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200122 kfree(io_req->sgl_task_params);
123 kfree(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800124 /* Make sure we free per command sense buffer */
125 if (io_req->sense_buffer)
126 dma_free_coherent(&qedf->pdev->dev,
127 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
128 io_req->sense_buffer_dma);
129 cancel_delayed_work_sync(&io_req->rrq_work);
130 }
131
132 /* Free command manager itself */
133 vfree(cmgr);
134}
135
136static void qedf_handle_rrq(struct work_struct *work)
137{
138 struct qedf_ioreq *io_req =
139 container_of(work, struct qedf_ioreq, rrq_work.work);
140
141 qedf_send_rrq(io_req);
142
143}
144
145struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
146{
147 struct qedf_cmd_mgr *cmgr;
148 struct io_bdt *bdt_info;
149 struct qedf_ioreq *io_req;
150 u16 xid;
151 int i;
152 int num_ios;
153 u16 min_xid = QEDF_MIN_XID;
154 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
155
156 /* Make sure num_queues is already set before calling this function */
157 if (!qedf->num_queues) {
158 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
159 return NULL;
160 }
161
162 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
163 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
164 "max_xid 0x%x.\n", min_xid, max_xid);
165 return NULL;
166 }
167
168 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
169 "0x%x.\n", min_xid, max_xid);
170
171 num_ios = max_xid - min_xid + 1;
172
173 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
174 if (!cmgr) {
175 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
176 return NULL;
177 }
178
179 cmgr->qedf = qedf;
180 spin_lock_init(&cmgr->lock);
181
182 /*
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200183 * Initialize I/O request fields.
Dupuis, Chad61d86582017-02-15 06:28:23 -0800184 */
185 xid = QEDF_MIN_XID;
186
187 for (i = 0; i < num_ios; i++) {
188 io_req = &cmgr->cmds[i];
189 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
190
191 io_req->xid = xid++;
192
193 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
194
195 /* Allocate DMA memory to hold sense buffer */
196 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
197 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
198 GFP_KERNEL);
199 if (!io_req->sense_buffer)
200 goto mem_err;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200201
202 /* Allocate task parameters to pass to f/w init funcions */
203 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
204 GFP_KERNEL);
205 if (!io_req->task_params) {
206 QEDF_ERR(&(qedf->dbg_ctx),
207 "Failed to allocate task_params for xid=0x%x\n",
208 i);
209 goto mem_err;
210 }
211
212 /*
213 * Allocate scatter/gather list info to pass to f/w init
214 * functions.
215 */
216 io_req->sgl_task_params = kzalloc(
217 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
218 if (!io_req->sgl_task_params) {
219 QEDF_ERR(&(qedf->dbg_ctx),
220 "Failed to allocate sgl_task_params for xid=0x%x\n",
221 i);
222 goto mem_err;
223 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800224 }
225
226 /* Allocate pool of io_bdts - one for each qedf_ioreq */
227 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
228 GFP_KERNEL);
229
230 if (!cmgr->io_bdt_pool) {
231 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
232 goto mem_err;
233 }
234
235 for (i = 0; i < num_ios; i++) {
236 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
237 GFP_KERNEL);
238 if (!cmgr->io_bdt_pool[i]) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200239 QEDF_WARN(&(qedf->dbg_ctx),
240 "Failed to alloc io_bdt_pool[%d].\n", i);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800241 goto mem_err;
242 }
243 }
244
245 for (i = 0; i < num_ios; i++) {
246 bdt_info = cmgr->io_bdt_pool[i];
247 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200248 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
Dupuis, Chad61d86582017-02-15 06:28:23 -0800249 &bdt_info->bd_tbl_dma, GFP_KERNEL);
250 if (!bdt_info->bd_tbl) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200251 QEDF_WARN(&(qedf->dbg_ctx),
252 "Failed to alloc bdt_tbl[%d].\n", i);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800253 goto mem_err;
254 }
255 }
256 atomic_set(&cmgr->free_list_cnt, num_ios);
257 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
258 "cmgr->free_list_cnt=%d.\n",
259 atomic_read(&cmgr->free_list_cnt));
260
261 return cmgr;
262
263mem_err:
264 qedf_cmd_mgr_free(cmgr);
265 return NULL;
266}
267
268struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
269{
270 struct qedf_ctx *qedf = fcport->qedf;
271 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
272 struct qedf_ioreq *io_req = NULL;
273 struct io_bdt *bd_tbl;
274 u16 xid;
275 uint32_t free_sqes;
276 int i;
277 unsigned long flags;
278
279 free_sqes = atomic_read(&fcport->free_sqes);
280
281 if (!free_sqes) {
282 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
283 "Returning NULL, free_sqes=%d.\n ",
284 free_sqes);
285 goto out_failed;
286 }
287
288 /* Limit the number of outstanding R/W tasks */
289 if ((atomic_read(&fcport->num_active_ios) >=
290 NUM_RW_TASKS_PER_CONNECTION)) {
291 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
292 "Returning NULL, num_active_ios=%d.\n",
293 atomic_read(&fcport->num_active_ios));
294 goto out_failed;
295 }
296
297 /* Limit global TIDs certain tasks */
298 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
299 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
300 "Returning NULL, free_list_cnt=%d.\n",
301 atomic_read(&cmd_mgr->free_list_cnt));
302 goto out_failed;
303 }
304
305 spin_lock_irqsave(&cmd_mgr->lock, flags);
306 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
307 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
308 cmd_mgr->idx++;
309 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
310 cmd_mgr->idx = 0;
311
312 /* Check to make sure command was previously freed */
313 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
314 break;
315 }
316
317 if (i == FCOE_PARAMS_NUM_TASKS) {
318 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
319 goto out_failed;
320 }
321
322 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
323 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
324
325 atomic_inc(&fcport->num_active_ios);
326 atomic_dec(&fcport->free_sqes);
327 xid = io_req->xid;
328 atomic_dec(&cmd_mgr->free_list_cnt);
329
330 io_req->cmd_mgr = cmd_mgr;
331 io_req->fcport = fcport;
332
333 /* Hold the io_req against deletion */
334 kref_init(&io_req->refcount);
335
336 /* Bind io_bdt for this io_req */
337 /* Have a static link between io_req and io_bdt_pool */
338 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
339 if (bd_tbl == NULL) {
340 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
341 kref_put(&io_req->refcount, qedf_release_cmd);
342 goto out_failed;
343 }
344 bd_tbl->io_req = io_req;
345 io_req->cmd_type = cmd_type;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200346 io_req->tm_flags = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800347
348 /* Reset sequence offset data */
349 io_req->rx_buf_off = 0;
350 io_req->tx_buf_off = 0;
351 io_req->rx_id = 0xffff; /* No OX_ID */
352
353 return io_req;
354
355out_failed:
356 /* Record failure for stats and return NULL to caller */
357 qedf->alloc_failures++;
358 return NULL;
359}
360
361static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
362{
363 struct qedf_mp_req *mp_req = &(io_req->mp_req);
364 struct qedf_ctx *qedf = io_req->fcport->qedf;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200365 uint64_t sz = sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800366
367 /* clear tm flags */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800368 if (mp_req->mp_req_bd) {
369 dma_free_coherent(&qedf->pdev->dev, sz,
370 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
371 mp_req->mp_req_bd = NULL;
372 }
373 if (mp_req->mp_resp_bd) {
374 dma_free_coherent(&qedf->pdev->dev, sz,
375 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
376 mp_req->mp_resp_bd = NULL;
377 }
378 if (mp_req->req_buf) {
379 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
380 mp_req->req_buf, mp_req->req_buf_dma);
381 mp_req->req_buf = NULL;
382 }
383 if (mp_req->resp_buf) {
384 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
385 mp_req->resp_buf, mp_req->resp_buf_dma);
386 mp_req->resp_buf = NULL;
387 }
388}
389
390void qedf_release_cmd(struct kref *ref)
391{
392 struct qedf_ioreq *io_req =
393 container_of(ref, struct qedf_ioreq, refcount);
394 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
395 struct qedf_rport *fcport = io_req->fcport;
396
397 if (io_req->cmd_type == QEDF_ELS ||
398 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
399 qedf_free_mp_resc(io_req);
400
401 atomic_inc(&cmd_mgr->free_list_cnt);
402 atomic_dec(&fcport->num_active_ios);
403 if (atomic_read(&fcport->num_active_ios) < 0)
404 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
405
406 /* Increment task retry identifier now that the request is released */
407 io_req->task_retry_identifier++;
408
409 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
410}
411
412static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
413 int bd_index)
414{
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200415 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800416 int frag_size, sg_frags;
417
418 sg_frags = 0;
419 while (sg_len) {
420 if (sg_len > QEDF_BD_SPLIT_SZ)
421 frag_size = QEDF_BD_SPLIT_SZ;
422 else
423 frag_size = sg_len;
424 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
425 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200426 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800427
428 addr += (u64)frag_size;
429 sg_frags++;
430 sg_len -= frag_size;
431 }
432 return sg_frags;
433}
434
435static int qedf_map_sg(struct qedf_ioreq *io_req)
436{
437 struct scsi_cmnd *sc = io_req->sc_cmd;
438 struct Scsi_Host *host = sc->device->host;
439 struct fc_lport *lport = shost_priv(host);
440 struct qedf_ctx *qedf = lport_priv(lport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200441 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800442 struct scatterlist *sg;
443 int byte_count = 0;
444 int sg_count = 0;
445 int bd_count = 0;
446 int sg_frags;
447 unsigned int sg_len;
448 u64 addr, end_addr;
449 int i;
450
451 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
452 scsi_sg_count(sc), sc->sc_data_direction);
453
454 sg = scsi_sglist(sc);
455
456 /*
457 * New condition to send single SGE as cached-SGL with length less
458 * than 64k.
459 */
460 if ((sg_count == 1) && (sg_dma_len(sg) <=
461 QEDF_MAX_SGLEN_FOR_CACHESGL)) {
462 sg_len = sg_dma_len(sg);
463 addr = (u64)sg_dma_address(sg);
464
465 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
466 bd[bd_count].sge_addr.hi = (addr >> 32);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200467 bd[bd_count].sge_len = (u16)sg_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800468
469 return ++bd_count;
470 }
471
472 scsi_for_each_sg(sc, sg, sg_count, i) {
473 sg_len = sg_dma_len(sg);
474 addr = (u64)sg_dma_address(sg);
475 end_addr = (u64)(addr + sg_len);
476
477 /*
478 * First s/g element in the list so check if the end_addr
479 * is paged aligned. Also check to make sure the length is
480 * at least page size.
481 */
482 if ((i == 0) && (sg_count > 1) &&
483 ((end_addr % QEDF_PAGE_SIZE) ||
484 sg_len < QEDF_PAGE_SIZE))
485 io_req->use_slowpath = true;
486 /*
487 * Last s/g element so check if the start address is paged
488 * aligned.
489 */
490 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
491 (addr % QEDF_PAGE_SIZE))
492 io_req->use_slowpath = true;
493 /*
494 * Intermediate s/g element so check if start and end address
495 * is page aligned.
496 */
497 else if ((i != 0) && (i != (sg_count - 1)) &&
498 ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
499 io_req->use_slowpath = true;
500
501 if (sg_len > QEDF_MAX_BD_LEN) {
502 sg_frags = qedf_split_bd(io_req, addr, sg_len,
503 bd_count);
504 } else {
505 sg_frags = 1;
506 bd[bd_count].sge_addr.lo = U64_LO(addr);
507 bd[bd_count].sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200508 bd[bd_count].sge_len = (uint16_t)sg_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800509 }
510
511 bd_count += sg_frags;
512 byte_count += sg_len;
513 }
514
515 if (byte_count != scsi_bufflen(sc))
516 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
517 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
518 scsi_bufflen(sc), io_req->xid);
519
520 return bd_count;
521}
522
523static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
524{
525 struct scsi_cmnd *sc = io_req->sc_cmd;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200526 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800527 int bd_count;
528
529 if (scsi_sg_count(sc)) {
530 bd_count = qedf_map_sg(io_req);
531 if (bd_count == 0)
532 return -ENOMEM;
533 } else {
534 bd_count = 0;
535 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200536 bd[0].sge_len = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800537 }
538 io_req->bd_tbl->bd_valid = bd_count;
539
540 return 0;
541}
542
543static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
544 struct fcp_cmnd *fcp_cmnd)
545{
546 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
547
548 /* fcp_cmnd is 32 bytes */
549 memset(fcp_cmnd, 0, FCP_CMND_LEN);
550
551 /* 8 bytes: SCSI LUN info */
552 int_to_scsilun(sc_cmd->device->lun,
553 (struct scsi_lun *)&fcp_cmnd->fc_lun);
554
555 /* 4 bytes: flag info */
556 fcp_cmnd->fc_pri_ta = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200557 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800558 fcp_cmnd->fc_flags = io_req->io_req_flags;
559 fcp_cmnd->fc_cmdref = 0;
560
561 /* Populate data direction */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200562 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200564 } else {
565 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
566 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
567 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
569 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800570
571 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
572
573 /* 16 bytes: CDB information */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200574 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
575 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800576
577 /* 4 bytes: FCP data length */
578 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800579}
580
581static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200582 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
583 struct fcoe_wqe *sqe)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800584{
585 enum fcoe_task_type task_type;
586 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
587 struct io_bdt *bd_tbl = io_req->bd_tbl;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200588 u8 fcp_cmnd[32];
Dupuis, Chad61d86582017-02-15 06:28:23 -0800589 u32 tmp_fcp_cmnd[8];
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200590 int bd_count = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800591 struct qedf_ctx *qedf = fcport->qedf;
592 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200593 struct regpair sense_data_buffer_phys_addr;
594 u32 tx_io_size = 0;
595 u32 rx_io_size = 0;
596 int i, cnt;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800597
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200598 /* Note init_initiator_rw_fcoe_task memsets the task context */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800599 io_req->task = task_ctx;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200600 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
601 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
602 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800603
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200604 /* Set task type bassed on DMA directio of command */
605 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800606 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800607 } else {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200608 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
609 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
610 tx_io_size = io_req->data_xfer_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800611 } else {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200612 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
613 rx_io_size = io_req->data_xfer_len;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800614 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800615 }
616
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200617 /* Setup the fields for fcoe_task_params */
618 io_req->task_params->context = task_ctx;
619 io_req->task_params->sqe = sqe;
620 io_req->task_params->task_type = task_type;
621 io_req->task_params->tx_io_size = tx_io_size;
622 io_req->task_params->rx_io_size = rx_io_size;
623 io_req->task_params->conn_cid = fcport->fw_cid;
624 io_req->task_params->itid = io_req->xid;
625 io_req->task_params->cq_rss_number = cq_idx;
626 io_req->task_params->is_tape_device = fcport->dev_type;
627
628 /* Fill in information for scatter/gather list */
629 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
630 bd_count = bd_tbl->bd_valid;
631 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
632 io_req->sgl_task_params->sgl_phys_addr.lo =
633 U64_LO(bd_tbl->bd_tbl_dma);
634 io_req->sgl_task_params->sgl_phys_addr.hi =
635 U64_HI(bd_tbl->bd_tbl_dma);
636 io_req->sgl_task_params->num_sges = bd_count;
637 io_req->sgl_task_params->total_buffer_size =
638 scsi_bufflen(io_req->sc_cmd);
639 io_req->sgl_task_params->small_mid_sge =
640 io_req->use_slowpath;
641 }
642
643 /* Fill in physical address of sense buffer */
644 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
645 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
646
Dupuis, Chad61d86582017-02-15 06:28:23 -0800647 /* fill FCP_CMND IU */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200648 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800649
650 /* Swap fcp_cmnd since FC is big endian */
651 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800652 for (i = 0; i < cnt; i++) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200653 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800654 }
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200655 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800656
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200657 init_initiator_rw_fcoe_task(io_req->task_params,
658 io_req->sgl_task_params,
659 sense_data_buffer_phys_addr,
660 io_req->task_retry_identifier, fcp_cmnd);
661
662 /* Increment SGL type counters */
663 if (bd_count == 1) {
664 qedf->single_sge_ios++;
665 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
666 } else if (io_req->use_slowpath) {
667 qedf->slow_sge_ios++;
668 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
669 } else {
670 qedf->fast_sge_ios++;
671 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
672 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800673}
674
675void qedf_init_mp_task(struct qedf_ioreq *io_req,
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200676 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800677{
678 struct qedf_mp_req *mp_req = &(io_req->mp_req);
679 struct qedf_rport *fcport = io_req->fcport;
680 struct qedf_ctx *qedf = io_req->fcport->qedf;
681 struct fc_frame_header *fc_hdr;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200682 struct fcoe_tx_mid_path_params task_fc_hdr;
683 struct scsi_sgl_task_params tx_sgl_task_params;
684 struct scsi_sgl_task_params rx_sgl_task_params;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800685
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200686 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
687 "Initializing MP task for cmd_type=%d\n",
688 io_req->cmd_type);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800689
690 qedf->control_requests++;
691
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200692 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
693 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800694 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200695 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800696
697 /* Setup the task from io_req for easy reference */
698 io_req->task = task_ctx;
699
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200700 /* Setup the fields for fcoe_task_params */
701 io_req->task_params->context = task_ctx;
702 io_req->task_params->sqe = sqe;
703 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
704 io_req->task_params->tx_io_size = io_req->data_xfer_len;
705 /* rx_io_size tells the f/w how large a response buffer we have */
706 io_req->task_params->rx_io_size = PAGE_SIZE;
707 io_req->task_params->conn_cid = fcport->fw_cid;
708 io_req->task_params->itid = io_req->xid;
709 /* Return middle path commands on CQ 0 */
710 io_req->task_params->cq_rss_number = 0;
711 io_req->task_params->is_tape_device = fcport->dev_type;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800712
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200713 fc_hdr = &(mp_req->req_fc_hdr);
714 /* Set OX_ID and RX_ID based on driver task id */
715 fc_hdr->fh_ox_id = io_req->xid;
716 fc_hdr->fh_rx_id = htons(0xffff);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800717
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200718 /* Set up FC header information */
719 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
720 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
721 task_fc_hdr.type = fc_hdr->fh_type;
722 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
723 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
724 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
725 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800726
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200727 /* Set up s/g list parameters for request buffer */
728 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
729 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
730 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
731 tx_sgl_task_params.num_sges = 1;
732 /* Set PAGE_SIZE for now since sg element is that size ??? */
733 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
734 tx_sgl_task_params.small_mid_sge = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800735
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200736 /* Set up s/g list parameters for request buffer */
737 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
738 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
739 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
740 rx_sgl_task_params.num_sges = 1;
741 /* Set PAGE_SIZE for now since sg element is that size ??? */
742 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
743 rx_sgl_task_params.small_mid_sge = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800744
Dupuis, Chad61d86582017-02-15 06:28:23 -0800745
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200746 /*
747 * Last arg is 0 as previous code did not set that we wanted the
748 * fc header information.
749 */
750 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
751 &task_fc_hdr,
752 &tx_sgl_task_params,
753 &rx_sgl_task_params, 0);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800754
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200755 /* Midpath requests always consume 1 SGE */
756 qedf->single_sge_ios++;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800757}
758
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200759/* Presumed that fcport->rport_lock is held */
760u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800761{
Dupuis, Chad61d86582017-02-15 06:28:23 -0800762 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200763 u16 rval;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800764
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200765 rval = fcport->sq_prod_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800766
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200767 /* Adjust ring index */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800768 fcport->sq_prod_idx++;
769 fcport->fw_sq_prod_idx++;
770 if (fcport->sq_prod_idx == total_sqe)
771 fcport->sq_prod_idx = 0;
772
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200773 return rval;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800774}
775
776void qedf_ring_doorbell(struct qedf_rport *fcport)
777{
778 struct fcoe_db_data dbell = { 0 };
779
780 dbell.agg_flags = 0;
781
782 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
783 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
784 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
785 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
786
787 dbell.sq_prod = fcport->fw_sq_prod_idx;
788 writel(*(u32 *)&dbell, fcport->p_doorbell);
789 /* Make sure SQ index is updated so f/w prcesses requests in order */
790 wmb();
791 mmiowb();
792}
793
794static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
795 int8_t direction)
796{
797 struct qedf_ctx *qedf = fcport->qedf;
798 struct qedf_io_log *io_log;
799 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
800 unsigned long flags;
801 uint8_t op;
802
803 spin_lock_irqsave(&qedf->io_trace_lock, flags);
804
805 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
806 io_log->direction = direction;
807 io_log->task_id = io_req->xid;
808 io_log->port_id = fcport->rdata->ids.port_id;
809 io_log->lun = sc_cmd->device->lun;
810 io_log->op = op = sc_cmd->cmnd[0];
811 io_log->lba[0] = sc_cmd->cmnd[2];
812 io_log->lba[1] = sc_cmd->cmnd[3];
813 io_log->lba[2] = sc_cmd->cmnd[4];
814 io_log->lba[3] = sc_cmd->cmnd[5];
815 io_log->bufflen = scsi_bufflen(sc_cmd);
816 io_log->sg_count = scsi_sg_count(sc_cmd);
817 io_log->result = sc_cmd->result;
818 io_log->jiffies = jiffies;
Dupuis, Chad1afca6b2017-02-23 07:01:03 -0800819 io_log->refcount = kref_read(&io_req->refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800820
821 if (direction == QEDF_IO_TRACE_REQ) {
822 /* For requests we only care abot the submission CPU */
823 io_log->req_cpu = io_req->cpu;
824 io_log->int_cpu = 0;
825 io_log->rsp_cpu = 0;
826 } else if (direction == QEDF_IO_TRACE_RSP) {
827 io_log->req_cpu = io_req->cpu;
828 io_log->int_cpu = io_req->int_cpu;
829 io_log->rsp_cpu = smp_processor_id();
830 }
831
832 io_log->sge_type = io_req->sge_type;
833
834 qedf->io_trace_idx++;
835 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
836 qedf->io_trace_idx = 0;
837
838 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
839}
840
841int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
842{
843 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
844 struct Scsi_Host *host = sc_cmd->device->host;
845 struct fc_lport *lport = shost_priv(host);
846 struct qedf_ctx *qedf = lport_priv(lport);
847 struct fcoe_task_context *task_ctx;
848 u16 xid;
849 enum fcoe_task_type req_type = 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200850 struct fcoe_wqe *sqe;
851 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800852
853 /* Initialize rest of io_req fileds */
854 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
855 sc_cmd->SCp.ptr = (char *)io_req;
856 io_req->use_slowpath = false; /* Assume fast SGL by default */
857
858 /* Record which cpu this request is associated with */
859 io_req->cpu = smp_processor_id();
860
861 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
862 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
863 io_req->io_req_flags = QEDF_READ;
864 qedf->input_requests++;
865 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
866 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
867 io_req->io_req_flags = QEDF_WRITE;
868 qedf->output_requests++;
869 } else {
870 io_req->io_req_flags = 0;
871 qedf->control_requests++;
872 }
873
874 xid = io_req->xid;
875
876 /* Build buffer descriptor list for firmware from sg list */
877 if (qedf_build_bd_list_from_sg(io_req)) {
878 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
879 kref_put(&io_req->refcount, qedf_release_cmd);
880 return -EAGAIN;
881 }
882
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200883 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
884 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
885 kref_put(&io_req->refcount, qedf_release_cmd);
886 }
887
888 /* Obtain free SQE */
889 sqe_idx = qedf_get_sqe_idx(fcport);
890 sqe = &fcport->sq[sqe_idx];
891 memset(sqe, 0, sizeof(struct fcoe_wqe));
892
Dupuis, Chad61d86582017-02-15 06:28:23 -0800893 /* Get the task context */
894 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
895 if (!task_ctx) {
896 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
897 xid);
898 kref_put(&io_req->refcount, qedf_release_cmd);
899 return -EINVAL;
900 }
901
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200902 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800903
904 /* Ring doorbell */
905 qedf_ring_doorbell(fcport);
906
907 if (qedf_io_tracing && io_req->sc_cmd)
908 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
909
910 return false;
911}
912
913int
914qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
915{
916 struct fc_lport *lport = shost_priv(host);
917 struct qedf_ctx *qedf = lport_priv(lport);
918 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
919 struct fc_rport_libfc_priv *rp = rport->dd_data;
920 struct qedf_rport *fcport = rport->dd_data;
921 struct qedf_ioreq *io_req;
922 int rc = 0;
923 int rval;
924 unsigned long flags = 0;
925
926
927 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
928 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
929 sc_cmd->result = DID_NO_CONNECT << 16;
930 sc_cmd->scsi_done(sc_cmd);
931 return 0;
932 }
933
934 rval = fc_remote_port_chkready(rport);
935 if (rval) {
936 sc_cmd->result = rval;
937 sc_cmd->scsi_done(sc_cmd);
938 return 0;
939 }
940
941 /* Retry command if we are doing a qed drain operation */
942 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
943 rc = SCSI_MLQUEUE_HOST_BUSY;
944 goto exit_qcmd;
945 }
946
947 if (lport->state != LPORT_ST_READY ||
948 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
949 rc = SCSI_MLQUEUE_HOST_BUSY;
950 goto exit_qcmd;
951 }
952
953 /* rport and tgt are allocated together, so tgt should be non-NULL */
954 fcport = (struct qedf_rport *)&rp[1];
955
956 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
957 /*
958 * Session is not offloaded yet. Let SCSI-ml retry
959 * the command.
960 */
961 rc = SCSI_MLQUEUE_TARGET_BUSY;
962 goto exit_qcmd;
963 }
964 if (fcport->retry_delay_timestamp) {
965 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
966 fcport->retry_delay_timestamp = 0;
967 } else {
968 /* If retry_delay timer is active, flow off the ML */
969 rc = SCSI_MLQUEUE_TARGET_BUSY;
970 goto exit_qcmd;
971 }
972 }
973
974 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
975 if (!io_req) {
976 rc = SCSI_MLQUEUE_HOST_BUSY;
977 goto exit_qcmd;
978 }
979
980 io_req->sc_cmd = sc_cmd;
981
982 /* Take fcport->rport_lock for posting to fcport send queue */
983 spin_lock_irqsave(&fcport->rport_lock, flags);
984 if (qedf_post_io_req(fcport, io_req)) {
985 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
986 /* Return SQE to pool */
987 atomic_inc(&fcport->free_sqes);
988 rc = SCSI_MLQUEUE_HOST_BUSY;
989 }
990 spin_unlock_irqrestore(&fcport->rport_lock, flags);
991
992exit_qcmd:
993 return rc;
994}
995
996static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
997 struct fcoe_cqe_rsp_info *fcp_rsp)
998{
999 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1000 struct qedf_ctx *qedf = io_req->fcport->qedf;
1001 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1002 int fcp_sns_len = 0;
1003 int fcp_rsp_len = 0;
1004 uint8_t *rsp_info, *sense_data;
1005
1006 io_req->fcp_status = FC_GOOD;
1007 io_req->fcp_resid = 0;
1008 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1009 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1010 io_req->fcp_resid = fcp_rsp->fcp_resid;
1011
1012 io_req->scsi_comp_flags = rsp_flags;
1013 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1014 fcp_rsp->scsi_status_code;
1015
1016 if (rsp_flags &
1017 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1018 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1019
1020 if (rsp_flags &
1021 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1022 fcp_sns_len = fcp_rsp->fcp_sns_len;
1023
1024 io_req->fcp_rsp_len = fcp_rsp_len;
1025 io_req->fcp_sns_len = fcp_sns_len;
1026 rsp_info = sense_data = io_req->sense_buffer;
1027
1028 /* fetch fcp_rsp_code */
1029 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1030 /* Only for task management function */
1031 io_req->fcp_rsp_code = rsp_info[3];
1032 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1033 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1034 /* Adjust sense-data location. */
1035 sense_data += fcp_rsp_len;
1036 }
1037
1038 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1039 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1040 "Truncating sense buffer\n");
1041 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1042 }
1043
Dupuis, Chad16a61112017-06-02 12:02:05 -07001044 /* The sense buffer can be NULL for TMF commands */
1045 if (sc_cmd->sense_buffer) {
1046 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1047 if (fcp_sns_len)
1048 memcpy(sc_cmd->sense_buffer, sense_data,
1049 fcp_sns_len);
1050 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001051}
1052
1053static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1054{
1055 struct scsi_cmnd *sc = io_req->sc_cmd;
1056
1057 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1058 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1059 scsi_sg_count(sc), sc->sc_data_direction);
1060 io_req->bd_tbl->bd_valid = 0;
1061 }
1062}
1063
1064void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1065 struct qedf_ioreq *io_req)
1066{
1067 u16 xid, rval;
1068 struct fcoe_task_context *task_ctx;
1069 struct scsi_cmnd *sc_cmd;
1070 struct fcoe_cqe_rsp_info *fcp_rsp;
1071 struct qedf_rport *fcport;
1072 int refcount;
1073 u16 scope, qualifier = 0;
1074 u8 fw_residual_flag = 0;
1075
1076 if (!io_req)
1077 return;
1078 if (!cqe)
1079 return;
1080
1081 xid = io_req->xid;
1082 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1083 sc_cmd = io_req->sc_cmd;
1084 fcp_rsp = &cqe->cqe_info.rsp_info;
1085
1086 if (!sc_cmd) {
1087 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1088 return;
1089 }
1090
1091 if (!sc_cmd->SCp.ptr) {
1092 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1093 "another context.\n");
1094 return;
1095 }
1096
1097 if (!sc_cmd->request) {
1098 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1099 "sc_cmd=%p.\n", sc_cmd);
1100 return;
1101 }
1102
1103 if (!sc_cmd->request->special) {
1104 QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1105 "request not valid, sc_cmd=%p.\n", sc_cmd);
1106 return;
1107 }
1108
1109 if (!sc_cmd->request->q) {
1110 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1111 "is not valid, sc_cmd=%p.\n", sc_cmd);
1112 return;
1113 }
1114
1115 fcport = io_req->fcport;
1116
1117 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1118
1119 qedf_unmap_sg_list(qedf, io_req);
1120
1121 /* Check for FCP transport error */
1122 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1123 QEDF_ERR(&(qedf->dbg_ctx),
1124 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1125 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1126 io_req->fcp_rsp_code);
1127 sc_cmd->result = DID_BUS_BUSY << 16;
1128 goto out;
1129 }
1130
1131 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1132 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1133 if (fw_residual_flag) {
1134 QEDF_ERR(&(qedf->dbg_ctx),
1135 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1136 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1137 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1138 cqe->cqe_info.rsp_info.fw_residual);
1139
1140 if (io_req->cdb_status == 0)
1141 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1142 else
1143 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1144
1145 /* Abort the command since we did not get all the data */
1146 init_completion(&io_req->abts_done);
1147 rval = qedf_initiate_abts(io_req, true);
1148 if (rval) {
1149 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1150 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1151 }
1152
1153 /*
1154 * Set resid to the whole buffer length so we won't try to resue
1155 * any previously data.
1156 */
1157 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1158 goto out;
1159 }
1160
1161 switch (io_req->fcp_status) {
1162 case FC_GOOD:
1163 if (io_req->cdb_status == 0) {
1164 /* Good I/O completion */
1165 sc_cmd->result = DID_OK << 16;
1166 } else {
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001167 refcount = kref_read(&io_req->refcount);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001168 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
Joe Perchesdb6b2062017-03-04 00:07:04 -08001169 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
Dupuis, Chad61d86582017-02-15 06:28:23 -08001170 "lba=%02x%02x%02x%02x cdb_status=%d "
1171 "fcp_resid=0x%x refcount=%d.\n",
1172 qedf->lport->host->host_no, sc_cmd->device->id,
1173 sc_cmd->device->lun, io_req->xid,
1174 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1175 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1176 io_req->cdb_status, io_req->fcp_resid,
1177 refcount);
1178 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1179
1180 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1181 io_req->cdb_status == SAM_STAT_BUSY) {
1182 /*
1183 * Check whether we need to set retry_delay at
1184 * all based on retry_delay module parameter
1185 * and the status qualifier.
1186 */
1187
1188 /* Upper 2 bits */
1189 scope = fcp_rsp->retry_delay_timer & 0xC000;
1190 /* Lower 14 bits */
1191 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1192
1193 if (qedf_retry_delay &&
1194 scope > 0 && qualifier > 0 &&
1195 qualifier <= 0x3FEF) {
1196 /* Check we don't go over the max */
1197 if (qualifier > QEDF_RETRY_DELAY_MAX)
1198 qualifier =
1199 QEDF_RETRY_DELAY_MAX;
1200 fcport->retry_delay_timestamp =
1201 jiffies + (qualifier * HZ / 10);
1202 }
1203 }
1204 }
1205 if (io_req->fcp_resid)
1206 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1207 break;
1208 default:
1209 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1210 io_req->fcp_status);
1211 break;
1212 }
1213
1214out:
1215 if (qedf_io_tracing)
1216 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1217
1218 io_req->sc_cmd = NULL;
1219 sc_cmd->SCp.ptr = NULL;
1220 sc_cmd->scsi_done(sc_cmd);
1221 kref_put(&io_req->refcount, qedf_release_cmd);
1222}
1223
1224/* Return a SCSI command in some other context besides a normal completion */
1225void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1226 int result)
1227{
1228 u16 xid;
1229 struct scsi_cmnd *sc_cmd;
1230 int refcount;
1231
1232 if (!io_req)
1233 return;
1234
1235 xid = io_req->xid;
1236 sc_cmd = io_req->sc_cmd;
1237
1238 if (!sc_cmd) {
1239 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1240 return;
1241 }
1242
1243 if (!sc_cmd->SCp.ptr) {
1244 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1245 "another context.\n");
1246 return;
1247 }
1248
1249 qedf_unmap_sg_list(qedf, io_req);
1250
1251 sc_cmd->result = result << 16;
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001252 refcount = kref_read(&io_req->refcount);
Joe Perchesdb6b2062017-03-04 00:07:04 -08001253 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
Dupuis, Chad61d86582017-02-15 06:28:23 -08001254 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1255 "allowed=%d retries=%d refcount=%d.\n",
1256 qedf->lport->host->host_no, sc_cmd->device->id,
1257 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1258 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1259 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1260 refcount);
1261
1262 /*
1263 * Set resid to the whole buffer length so we won't try to resue any
1264 * previously read data
1265 */
1266 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1267
1268 if (qedf_io_tracing)
1269 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1270
1271 io_req->sc_cmd = NULL;
1272 sc_cmd->SCp.ptr = NULL;
1273 sc_cmd->scsi_done(sc_cmd);
1274 kref_put(&io_req->refcount, qedf_release_cmd);
1275}
1276
1277/*
1278 * Handle warning type CQE completions. This is mainly used for REC timer
1279 * popping.
1280 */
1281void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1282 struct qedf_ioreq *io_req)
1283{
1284 int rval, i;
1285 struct qedf_rport *fcport = io_req->fcport;
1286 u64 err_warn_bit_map;
1287 u8 err_warn = 0xff;
1288
1289 if (!cqe)
1290 return;
1291
1292 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1293 "xid=0x%x\n", io_req->xid);
1294 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1295 "err_warn_bitmap=%08x:%08x\n",
1296 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1297 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1298 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1299 "rx_buff_off=%08x, rx_id=%04x\n",
1300 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1301 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1302 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1303
1304 /* Normalize the error bitmap value to an just an unsigned int */
1305 err_warn_bit_map = (u64)
1306 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1307 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1308 for (i = 0; i < 64; i++) {
1309 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1310 err_warn = i;
1311 break;
1312 }
1313 }
1314
1315 /* Check if REC TOV expired if this is a tape device */
1316 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1317 if (err_warn ==
1318 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1319 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1320 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1321 io_req->rx_buf_off =
1322 cqe->cqe_info.err_info.rx_buf_off;
1323 io_req->tx_buf_off =
1324 cqe->cqe_info.err_info.tx_buf_off;
1325 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1326 rval = qedf_send_rec(io_req);
1327 /*
1328 * We only want to abort the io_req if we
1329 * can't queue the REC command as we want to
1330 * keep the exchange open for recovery.
1331 */
1332 if (rval)
1333 goto send_abort;
1334 }
1335 return;
1336 }
1337 }
1338
1339send_abort:
1340 init_completion(&io_req->abts_done);
1341 rval = qedf_initiate_abts(io_req, true);
1342 if (rval)
1343 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1344}
1345
1346/* Cleanup a command when we receive an error detection completion */
1347void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1348 struct qedf_ioreq *io_req)
1349{
1350 int rval;
1351
1352 if (!cqe)
1353 return;
1354
1355 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1356 "xid=0x%x\n", io_req->xid);
1357 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1358 "err_warn_bitmap=%08x:%08x\n",
1359 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1360 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1361 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1362 "rx_buff_off=%08x, rx_id=%04x\n",
1363 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1364 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1365 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1366
1367 if (qedf->stop_io_on_error) {
1368 qedf_stop_all_io(qedf);
1369 return;
1370 }
1371
1372 init_completion(&io_req->abts_done);
1373 rval = qedf_initiate_abts(io_req, true);
1374 if (rval)
1375 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1376}
1377
1378static void qedf_flush_els_req(struct qedf_ctx *qedf,
1379 struct qedf_ioreq *els_req)
1380{
1381 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1382 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
Dupuis, Chad1afca6b2017-02-23 07:01:03 -08001383 kref_read(&els_req->refcount));
Dupuis, Chad61d86582017-02-15 06:28:23 -08001384
1385 /*
1386 * Need to distinguish this from a timeout when calling the
1387 * els_req->cb_func.
1388 */
1389 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1390
1391 /* Cancel the timer */
1392 cancel_delayed_work_sync(&els_req->timeout_work);
1393
1394 /* Call callback function to complete command */
1395 if (els_req->cb_func && els_req->cb_arg) {
1396 els_req->cb_func(els_req->cb_arg);
1397 els_req->cb_arg = NULL;
1398 }
1399
1400 /* Release kref for original initiate_els */
1401 kref_put(&els_req->refcount, qedf_release_cmd);
1402}
1403
1404/* A value of -1 for lun is a wild card that means flush all
1405 * active SCSI I/Os for the target.
1406 */
1407void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1408{
1409 struct qedf_ioreq *io_req;
1410 struct qedf_ctx *qedf;
1411 struct qedf_cmd_mgr *cmd_mgr;
1412 int i, rc;
1413
1414 if (!fcport)
1415 return;
1416
1417 qedf = fcport->qedf;
1418 cmd_mgr = qedf->cmd_mgr;
1419
1420 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1421
1422 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1423 io_req = &cmd_mgr->cmds[i];
1424
1425 if (!io_req)
1426 continue;
1427 if (io_req->fcport != fcport)
1428 continue;
1429 if (io_req->cmd_type == QEDF_ELS) {
1430 rc = kref_get_unless_zero(&io_req->refcount);
1431 if (!rc) {
1432 QEDF_ERR(&(qedf->dbg_ctx),
1433 "Could not get kref for io_req=0x%p.\n",
1434 io_req);
1435 continue;
1436 }
1437 qedf_flush_els_req(qedf, io_req);
1438 /*
1439 * Release the kref and go back to the top of the
1440 * loop.
1441 */
1442 goto free_cmd;
1443 }
1444
1445 if (!io_req->sc_cmd)
1446 continue;
1447 if (lun > 0) {
1448 if (io_req->sc_cmd->device->lun !=
1449 (u64)lun)
1450 continue;
1451 }
1452
1453 /*
1454 * Use kref_get_unless_zero in the unlikely case the command
1455 * we're about to flush was completed in the normal SCSI path
1456 */
1457 rc = kref_get_unless_zero(&io_req->refcount);
1458 if (!rc) {
1459 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1460 "io_req=0x%p\n", io_req);
1461 continue;
1462 }
1463 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1464 "Cleanup xid=0x%x.\n", io_req->xid);
1465
1466 /* Cleanup task and return I/O mid-layer */
1467 qedf_initiate_cleanup(io_req, true);
1468
1469free_cmd:
1470 kref_put(&io_req->refcount, qedf_release_cmd);
1471 }
1472}
1473
1474/*
1475 * Initiate a ABTS middle path command. Note that we don't have to initialize
1476 * the task context for an ABTS task.
1477 */
1478int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1479{
1480 struct fc_lport *lport;
1481 struct qedf_rport *fcport = io_req->fcport;
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001482 struct fc_rport_priv *rdata;
1483 struct qedf_ctx *qedf;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001484 u16 xid;
1485 u32 r_a_tov = 0;
1486 int rc = 0;
1487 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001488 struct fcoe_wqe *sqe;
1489 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001490
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001491 /* Sanity check qedf_rport before dereferencing any pointers */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001492 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001493 QEDF_ERR(NULL, "tgt not offloaded\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08001494 rc = 1;
1495 goto abts_err;
1496 }
1497
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001498 rdata = fcport->rdata;
1499 r_a_tov = rdata->r_a_tov;
1500 qedf = fcport->qedf;
1501 lport = qedf->lport;
1502
Dupuis, Chad61d86582017-02-15 06:28:23 -08001503 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1504 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1505 rc = 1;
1506 goto abts_err;
1507 }
1508
1509 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1510 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1511 rc = 1;
1512 goto abts_err;
1513 }
1514
1515 /* Ensure room on SQ */
1516 if (!atomic_read(&fcport->free_sqes)) {
1517 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1518 rc = 1;
1519 goto abts_err;
1520 }
1521
1522
1523 kref_get(&io_req->refcount);
1524
1525 xid = io_req->xid;
1526 qedf->control_requests++;
1527 qedf->packet_aborts++;
1528
1529 /* Set the return CPU to be the same as the request one */
1530 io_req->cpu = smp_processor_id();
1531
1532 /* Set the command type to abort */
1533 io_req->cmd_type = QEDF_ABTS;
1534 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1535
1536 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1537 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1538 "0x%x\n", xid);
1539
1540 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1541
1542 spin_lock_irqsave(&fcport->rport_lock, flags);
1543
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001544 sqe_idx = qedf_get_sqe_idx(fcport);
1545 sqe = &fcport->sq[sqe_idx];
1546 memset(sqe, 0, sizeof(struct fcoe_wqe));
1547 io_req->task_params->sqe = sqe;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001548
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001549 init_initiator_abort_fcoe_task(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001550 qedf_ring_doorbell(fcport);
1551
1552 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1553
1554 return rc;
1555abts_err:
1556 /*
1557 * If the ABTS task fails to queue then we need to cleanup the
1558 * task at the firmware.
1559 */
1560 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1561 return rc;
1562}
1563
1564void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1565 struct qedf_ioreq *io_req)
1566{
1567 uint32_t r_ctl;
1568 uint16_t xid;
1569
1570 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1571 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1572
1573 cancel_delayed_work(&io_req->timeout_work);
1574
1575 xid = io_req->xid;
1576 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1577
1578 switch (r_ctl) {
1579 case FC_RCTL_BA_ACC:
1580 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1581 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1582 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1583 /*
1584 * Dont release this cmd yet. It will be relesed
1585 * after we get RRQ response
1586 */
1587 kref_get(&io_req->refcount);
1588 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1589 msecs_to_jiffies(qedf->lport->r_a_tov));
1590 break;
1591 /* For error cases let the cleanup return the command */
1592 case FC_RCTL_BA_RJT:
1593 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1594 "ABTS response - RJT\n");
1595 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1596 break;
1597 default:
1598 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1599 break;
1600 }
1601
1602 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1603
1604 if (io_req->sc_cmd) {
1605 if (io_req->return_scsi_cmd_on_abts)
1606 qedf_scsi_done(qedf, io_req, DID_ERROR);
1607 }
1608
1609 /* Notify eh_abort handler that ABTS is complete */
1610 complete(&io_req->abts_done);
1611
1612 kref_put(&io_req->refcount, qedf_release_cmd);
1613}
1614
1615int qedf_init_mp_req(struct qedf_ioreq *io_req)
1616{
1617 struct qedf_mp_req *mp_req;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001618 struct scsi_sge *mp_req_bd;
1619 struct scsi_sge *mp_resp_bd;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001620 struct qedf_ctx *qedf = io_req->fcport->qedf;
1621 dma_addr_t addr;
1622 uint64_t sz;
1623
1624 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1625
1626 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1627 memset(mp_req, 0, sizeof(struct qedf_mp_req));
1628
1629 if (io_req->cmd_type != QEDF_ELS) {
1630 mp_req->req_len = sizeof(struct fcp_cmnd);
1631 io_req->data_xfer_len = mp_req->req_len;
1632 } else
1633 mp_req->req_len = io_req->data_xfer_len;
1634
1635 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1636 &mp_req->req_buf_dma, GFP_KERNEL);
1637 if (!mp_req->req_buf) {
1638 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1639 qedf_free_mp_resc(io_req);
1640 return -ENOMEM;
1641 }
1642
1643 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1644 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1645 if (!mp_req->resp_buf) {
1646 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1647 "buffer\n");
1648 qedf_free_mp_resc(io_req);
1649 return -ENOMEM;
1650 }
1651
1652 /* Allocate and map mp_req_bd and mp_resp_bd */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001653 sz = sizeof(struct scsi_sge);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001654 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1655 &mp_req->mp_req_bd_dma, GFP_KERNEL);
1656 if (!mp_req->mp_req_bd) {
1657 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1658 qedf_free_mp_resc(io_req);
1659 return -ENOMEM;
1660 }
1661
1662 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1663 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1664 if (!mp_req->mp_resp_bd) {
1665 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1666 qedf_free_mp_resc(io_req);
1667 return -ENOMEM;
1668 }
1669
1670 /* Fill bd table */
1671 addr = mp_req->req_buf_dma;
1672 mp_req_bd = mp_req->mp_req_bd;
1673 mp_req_bd->sge_addr.lo = U64_LO(addr);
1674 mp_req_bd->sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001675 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001676
1677 /*
1678 * MP buffer is either a task mgmt command or an ELS.
1679 * So the assumption is that it consumes a single bd
1680 * entry in the bd table
1681 */
1682 mp_resp_bd = mp_req->mp_resp_bd;
1683 addr = mp_req->resp_buf_dma;
1684 mp_resp_bd->sge_addr.lo = U64_LO(addr);
1685 mp_resp_bd->sge_addr.hi = U64_HI(addr);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001686 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001687
1688 return 0;
1689}
1690
1691/*
1692 * Last ditch effort to clear the port if it's stuck. Used only after a
1693 * cleanup task times out.
1694 */
1695static void qedf_drain_request(struct qedf_ctx *qedf)
1696{
1697 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1698 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1699 return;
1700 }
1701
1702 /* Set bit to return all queuecommand requests as busy */
1703 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1704
1705 /* Call qed drain request for function. Should be synchronous */
1706 qed_ops->common->drain(qedf->cdev);
1707
1708 /* Settle time for CQEs to be returned */
1709 msleep(100);
1710
1711 /* Unplug and continue */
1712 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1713}
1714
1715/*
1716 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1717 * FAILURE.
1718 */
1719int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1720 bool return_scsi_cmd_on_abts)
1721{
1722 struct qedf_rport *fcport;
1723 struct qedf_ctx *qedf;
1724 uint16_t xid;
1725 struct fcoe_task_context *task;
1726 int tmo = 0;
1727 int rc = SUCCESS;
1728 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001729 struct fcoe_wqe *sqe;
1730 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001731
1732 fcport = io_req->fcport;
1733 if (!fcport) {
1734 QEDF_ERR(NULL, "fcport is NULL.\n");
1735 return SUCCESS;
1736 }
1737
Chad Dupuisff34e8e2017-05-31 06:33:52 -07001738 /* Sanity check qedf_rport before dereferencing any pointers */
1739 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1740 QEDF_ERR(NULL, "tgt not offloaded\n");
1741 rc = 1;
1742 return SUCCESS;
1743 }
1744
Dupuis, Chad61d86582017-02-15 06:28:23 -08001745 qedf = fcport->qedf;
1746 if (!qedf) {
1747 QEDF_ERR(NULL, "qedf is NULL.\n");
1748 return SUCCESS;
1749 }
1750
1751 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1752 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1753 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1754 "cleanup processing or already completed.\n",
1755 io_req->xid);
1756 return SUCCESS;
1757 }
1758
1759 /* Ensure room on SQ */
1760 if (!atomic_read(&fcport->free_sqes)) {
1761 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1762 return FAILED;
1763 }
1764
1765
1766 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1767 io_req->xid);
1768
1769 /* Cleanup cmds re-use the same TID as the original I/O */
1770 xid = io_req->xid;
1771 io_req->cmd_type = QEDF_CLEANUP;
1772 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1773
1774 /* Set the return CPU to be the same as the request one */
1775 io_req->cpu = smp_processor_id();
1776
1777 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1778
1779 task = qedf_get_task_mem(&qedf->tasks, xid);
1780
1781 init_completion(&io_req->tm_done);
1782
Dupuis, Chad61d86582017-02-15 06:28:23 -08001783 spin_lock_irqsave(&fcport->rport_lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001784
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001785 sqe_idx = qedf_get_sqe_idx(fcport);
1786 sqe = &fcport->sq[sqe_idx];
1787 memset(sqe, 0, sizeof(struct fcoe_wqe));
1788 io_req->task_params->sqe = sqe;
1789
1790 init_initiator_cleanup_fcoe_task(io_req->task_params);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001791 qedf_ring_doorbell(fcport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001792
Dupuis, Chad61d86582017-02-15 06:28:23 -08001793 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1794
1795 tmo = wait_for_completion_timeout(&io_req->tm_done,
1796 QEDF_CLEANUP_TIMEOUT * HZ);
1797
1798 if (!tmo) {
1799 rc = FAILED;
1800 /* Timeout case */
1801 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1802 "xid=%x.\n", io_req->xid);
1803 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1804 /* Issue a drain request if cleanup task times out */
1805 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1806 qedf_drain_request(qedf);
1807 }
1808
1809 if (io_req->sc_cmd) {
1810 if (io_req->return_scsi_cmd_on_abts)
1811 qedf_scsi_done(qedf, io_req, DID_ERROR);
1812 }
1813
1814 if (rc == SUCCESS)
1815 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1816 else
1817 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1818
1819 return rc;
1820}
1821
1822void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1823 struct qedf_ioreq *io_req)
1824{
1825 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1826 io_req->xid);
1827
1828 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1829
1830 /* Complete so we can finish cleaning up the I/O */
1831 complete(&io_req->tm_done);
1832}
1833
1834static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1835 uint8_t tm_flags)
1836{
1837 struct qedf_ioreq *io_req;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001838 struct fcoe_task_context *task;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001839 struct qedf_ctx *qedf = fcport->qedf;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001840 struct fc_lport *lport = qedf->lport;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001841 int rc = 0;
1842 uint16_t xid;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001843 int tmo = 0;
1844 unsigned long flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001845 struct fcoe_wqe *sqe;
1846 u16 sqe_idx;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001847
1848 if (!sc_cmd) {
1849 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
1850 return FAILED;
1851 }
1852
Chad Dupuis57a35482017-05-31 06:33:58 -07001853 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
Dupuis, Chad61d86582017-02-15 06:28:23 -08001854 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
1855 rc = FAILED;
1856 return FAILED;
1857 }
1858
1859 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1860 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
1861
1862 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
1863 if (!io_req) {
1864 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
1865 rc = -EAGAIN;
1866 goto reset_tmf_err;
1867 }
1868
1869 /* Initialize rest of io_req fields */
1870 io_req->sc_cmd = sc_cmd;
1871 io_req->fcport = fcport;
1872 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
1873
1874 /* Set the return CPU to be the same as the request one */
1875 io_req->cpu = smp_processor_id();
1876
Dupuis, Chad61d86582017-02-15 06:28:23 -08001877 /* Set TM flags */
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001878 io_req->io_req_flags = QEDF_READ;
1879 io_req->data_xfer_len = 0;
1880 io_req->tm_flags = tm_flags;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001881
1882 /* Default is to return a SCSI command when an error occurs */
1883 io_req->return_scsi_cmd_on_abts = true;
1884
Dupuis, Chad61d86582017-02-15 06:28:23 -08001885 /* Obtain exchange id */
1886 xid = io_req->xid;
1887
1888 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
1889 "0x%x\n", xid);
1890
1891 /* Initialize task context for this IO request */
1892 task = qedf_get_task_mem(&qedf->tasks, xid);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001893
1894 init_completion(&io_req->tm_done);
1895
Dupuis, Chad61d86582017-02-15 06:28:23 -08001896 spin_lock_irqsave(&fcport->rport_lock, flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001897
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001898 sqe_idx = qedf_get_sqe_idx(fcport);
1899 sqe = &fcport->sq[sqe_idx];
1900 memset(sqe, 0, sizeof(struct fcoe_wqe));
1901
1902 qedf_init_task(fcport, lport, io_req, task, sqe);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001903 qedf_ring_doorbell(fcport);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001904
Dupuis, Chad61d86582017-02-15 06:28:23 -08001905 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1906
1907 tmo = wait_for_completion_timeout(&io_req->tm_done,
1908 QEDF_TM_TIMEOUT * HZ);
1909
1910 if (!tmo) {
1911 rc = FAILED;
1912 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
1913 } else {
1914 /* Check TMF response code */
1915 if (io_req->fcp_rsp_code == 0)
1916 rc = SUCCESS;
1917 else
1918 rc = FAILED;
1919 }
1920
1921 if (tm_flags == FCP_TMF_LUN_RESET)
1922 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
1923 else
1924 qedf_flush_active_ios(fcport, -1);
1925
1926 kref_put(&io_req->refcount, qedf_release_cmd);
1927
1928 if (rc != SUCCESS) {
1929 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
1930 rc = FAILED;
1931 } else {
1932 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
1933 rc = SUCCESS;
1934 }
1935reset_tmf_err:
1936 return rc;
1937}
1938
1939int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
1940{
1941 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1942 struct fc_rport_libfc_priv *rp = rport->dd_data;
1943 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
1944 struct qedf_ctx *qedf;
1945 struct fc_lport *lport;
1946 int rc = SUCCESS;
1947 int rval;
1948
1949 rval = fc_remote_port_chkready(rport);
1950
1951 if (rval) {
1952 QEDF_ERR(NULL, "device_reset rport not ready\n");
1953 rc = FAILED;
1954 goto tmf_err;
1955 }
1956
1957 if (fcport == NULL) {
1958 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
1959 rc = FAILED;
1960 goto tmf_err;
1961 }
1962
1963 qedf = fcport->qedf;
1964 lport = qedf->lport;
1965
1966 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
1967 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
1968 rc = SUCCESS;
1969 goto tmf_err;
1970 }
1971
1972 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1973 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1974 rc = FAILED;
1975 goto tmf_err;
1976 }
1977
1978 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
1979
1980tmf_err:
1981 return rc;
1982}
1983
1984void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1985 struct qedf_ioreq *io_req)
1986{
1987 struct fcoe_cqe_rsp_info *fcp_rsp;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001988
1989 fcp_rsp = &cqe->cqe_info.rsp_info;
1990 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1991
1992 io_req->sc_cmd = NULL;
1993 complete(&io_req->tm_done);
1994}
1995
1996void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
1997 struct fcoe_cqe *cqe)
1998{
1999 unsigned long flags;
2000 uint16_t tmp;
2001 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2002 u32 payload_len, crc;
2003 struct fc_frame_header *fh;
2004 struct fc_frame *fp;
2005 struct qedf_io_work *io_work;
2006 u32 bdq_idx;
2007 void *bdq_addr;
2008
2009 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2010 "address.hi=%x address.lo=%x opaque_data.hi=%x "
2011 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
2012 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
2013 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
2014 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
2015 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
2016 qedf->bdq_prod_idx, pktlen);
2017
2018 bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
2019 if (bdq_idx >= QEDF_BDQ_SIZE) {
2020 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2021 bdq_idx);
2022 goto increment_prod;
2023 }
2024
2025 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2026 if (!bdq_addr) {
2027 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2028 "unsolicited packet.\n");
2029 goto increment_prod;
2030 }
2031
2032 if (qedf_dump_frames) {
2033 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2034 "BDQ frame is at addr=%p.\n", bdq_addr);
2035 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2036 (void *)bdq_addr, pktlen, false);
2037 }
2038
2039 /* Allocate frame */
2040 payload_len = pktlen - sizeof(struct fc_frame_header);
2041 fp = fc_frame_alloc(qedf->lport, payload_len);
2042 if (!fp) {
2043 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2044 goto increment_prod;
2045 }
2046
2047 /* Copy data from BDQ buffer into fc_frame struct */
2048 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2049 memcpy(fh, (void *)bdq_addr, pktlen);
2050
2051 /* Initialize the frame so libfc sees it as a valid frame */
2052 crc = fcoe_fc_crc(fp);
2053 fc_frame_init(fp);
2054 fr_dev(fp) = qedf->lport;
2055 fr_sof(fp) = FC_SOF_I3;
2056 fr_eof(fp) = FC_EOF_T;
2057 fr_crc(fp) = cpu_to_le32(~crc);
2058
2059 /*
2060 * We need to return the frame back up to libfc in a non-atomic
2061 * context
2062 */
2063 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2064 if (!io_work) {
2065 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2066 "work for I/O completion.\n");
2067 fc_frame_free(fp);
2068 goto increment_prod;
2069 }
2070 memset(io_work, 0, sizeof(struct qedf_io_work));
2071
2072 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2073
2074 /* Copy contents of CQE for deferred processing */
2075 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2076
2077 io_work->qedf = qedf;
2078 io_work->fp = fp;
2079
2080 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2081increment_prod:
2082 spin_lock_irqsave(&qedf->hba_lock, flags);
2083
2084 /* Increment producer to let f/w know we've handled the frame */
2085 qedf->bdq_prod_idx++;
2086
2087 /* Producer index wraps at uint16_t boundary */
2088 if (qedf->bdq_prod_idx == 0xffff)
2089 qedf->bdq_prod_idx = 0;
2090
2091 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2092 tmp = readw(qedf->bdq_primary_prod);
2093 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2094 tmp = readw(qedf->bdq_secondary_prod);
2095
2096 spin_unlock_irqrestore(&qedf->hba_lock, flags);
2097}