blob: cf19f4976f5fb6338509d85a6e9c4624b9b882b5 [file] [log] [blame]
James Smart43140ca2017-03-04 09:30:34 -08001
dea31012005-04-17 16:05:31 -05002/*******************************************************************
3 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04004 * Fibre Channel Host Bus Adapters. *
James Smartd080abe2017-02-12 13:52:39 -08005 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
6 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
James Smart50611572016-03-31 14:12:34 -07007 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04008 * EMULEX and SLI are trademarks of Emulex. *
James Smartd080abe2017-02-12 13:52:39 -08009 * www.broadcom.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040010 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -050011 * *
12 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040013 * modify it under the terms of version 2 of the GNU General *
14 * Public License as published by the Free Software Foundation. *
15 * This program is distributed in the hope that it will be useful. *
16 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
17 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
18 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
19 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
20 * TO BE LEGALLY INVALID. See the GNU General Public License for *
21 * more details, a copy of which can be found in the file COPYING *
22 * included with this package. *
dea31012005-04-17 16:05:31 -050023 *******************************************************************/
24
dea31012005-04-17 16:05:31 -050025#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010030#include <linux/lockdep.h>
dea31012005-04-17 16:05:31 -050031
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040032#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050033#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
James.Smart@Emulex.Comf888ba32005-08-10 15:03:01 -040036#include <scsi/scsi_transport_fc.h>
James Smartda0436e2009-05-22 14:51:39 -040037#include <scsi/fc/fc_fs.h>
James Smart0d878412009-10-02 15:16:56 -040038#include <linux/aer.h>
dea31012005-04-17 16:05:31 -050039
James Smart895427b2017-02-12 13:52:30 -080040#include <linux/nvme-fc-driver.h>
41
James Smartda0436e2009-05-22 14:51:39 -040042#include "lpfc_hw4.h"
dea31012005-04-17 16:05:31 -050043#include "lpfc_hw.h"
44#include "lpfc_sli.h"
James Smartda0436e2009-05-22 14:51:39 -040045#include "lpfc_sli4.h"
James Smartea2151b2008-09-07 11:52:10 -040046#include "lpfc_nl.h"
dea31012005-04-17 16:05:31 -050047#include "lpfc_disc.h"
dea31012005-04-17 16:05:31 -050048#include "lpfc.h"
James Smart895427b2017-02-12 13:52:30 -080049#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
James Smartf358dd02017-02-12 13:52:34 -080051#include "lpfc_nvmet.h"
dea31012005-04-17 16:05:31 -050052#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
James Smart858c9f62007-06-17 19:56:39 -050055#include "lpfc_debugfs.h"
James Smart04c68492009-05-22 14:52:52 -040056#include "lpfc_vport.h"
James Smart61bda8f2016-10-13 15:06:05 -070057#include "lpfc_version.h"
dea31012005-04-17 16:05:31 -050058
59/* There are only four IOCB completion types. */
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
James Smart4f774512009-05-22 14:52:35 -040067
68/* Provide function prototypes local to this module. */
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
James Smart45ed1192009-10-02 15:17:02 -040072 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
James Smart6669f9b2009-10-02 15:16:45 -040075static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
James Smart895427b2017-02-12 13:52:30 -080077static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
James Smart05580562011-05-24 11:40:48 -040078 struct lpfc_cqe *);
James Smart895427b2017-02-12 13:52:30 -080079static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
James Smart8a9d2e82012-05-09 21:16:12 -040080 int);
James Smartba20c852012-08-03 12:36:52 -040081static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
82 uint32_t);
James Smarte8d3c3b2013-10-10 12:21:30 -040083static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
84static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
James Smart895427b2017-02-12 13:52:30 -080085static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
86 struct lpfc_sli_ring *pring,
87 struct lpfc_iocbq *cmdiocb);
James Smart05580562011-05-24 11:40:48 -040088
James Smart4f774512009-05-22 14:52:35 -040089static IOCB_t *
90lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
91{
92 return &iocbq->iocb;
93}
94
95/**
96 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
97 * @q: The Work Queue to operate on.
98 * @wqe: The work Queue Entry to put on the Work queue.
99 *
100 * This routine will copy the contents of @wqe to the next available entry on
101 * the @q. This function will then ring the Work Queue Doorbell to signal the
102 * HBA to start processing the Work Queue Entry. This function returns 0 if
103 * successful. If no entries are available on @q then this function will return
104 * -ENOMEM.
105 * The caller is expected to hold the hbalock when calling this routine.
106 **/
107static uint32_t
108lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
109{
James Smart2e90f4b2011-12-13 13:22:37 -0500110 union lpfc_wqe *temp_wqe;
James Smart4f774512009-05-22 14:52:35 -0400111 struct lpfc_register doorbell;
112 uint32_t host_index;
James Smart027140e2012-08-03 12:35:44 -0400113 uint32_t idx;
James Smart4f774512009-05-22 14:52:35 -0400114
James Smart2e90f4b2011-12-13 13:22:37 -0500115 /* sanity check on queue memory */
116 if (unlikely(!q))
117 return -ENOMEM;
118 temp_wqe = q->qe[q->host_index].wqe;
119
James Smart4f774512009-05-22 14:52:35 -0400120 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400121 idx = ((q->host_index + 1) % q->entry_count);
122 if (idx == q->hba_index) {
James Smartb84daac2012-08-03 12:35:13 -0400123 q->WQ_overflow++;
James Smart4f774512009-05-22 14:52:35 -0400124 return -ENOMEM;
James Smartb84daac2012-08-03 12:35:13 -0400125 }
126 q->WQ_posted++;
James Smart4f774512009-05-22 14:52:35 -0400127 /* set consumption flag every once in a while */
James Smartff78d8f2011-12-13 13:21:35 -0500128 if (!((q->host_index + 1) % q->entry_repost))
James Smartf0d9bcc2010-10-22 11:07:09 -0400129 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
James Smartfedd3b72011-02-16 12:39:24 -0500130 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
131 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400132 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
James Smart6b3b3bd2016-12-19 15:07:30 -0800133 /* ensure WQE bcopy flushed before doorbell write */
134 wmb();
James Smart4f774512009-05-22 14:52:35 -0400135
136 /* Update the host index before invoking device */
137 host_index = q->host_index;
James Smart027140e2012-08-03 12:35:44 -0400138
139 q->host_index = idx;
James Smart4f774512009-05-22 14:52:35 -0400140
141 /* Ring Doorbell */
142 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500143 if (q->db_format == LPFC_DB_LIST_FORMAT) {
144 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
145 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
146 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
147 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
148 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
149 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
150 } else {
151 return -EINVAL;
152 }
153 writel(doorbell.word0, q->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400154
155 return 0;
156}
157
158/**
159 * lpfc_sli4_wq_release - Updates internal hba index for WQ
160 * @q: The Work Queue to operate on.
161 * @index: The index to advance the hba index to.
162 *
163 * This routine will update the HBA index of a queue to reflect consumption of
164 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
165 * an entry the host calls this function to update the queue's internal
166 * pointers. This routine returns the number of entries that were consumed by
167 * the HBA.
168 **/
169static uint32_t
170lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
171{
172 uint32_t released = 0;
173
James Smart2e90f4b2011-12-13 13:22:37 -0500174 /* sanity check on queue memory */
175 if (unlikely(!q))
176 return 0;
177
James Smart4f774512009-05-22 14:52:35 -0400178 if (q->hba_index == index)
179 return 0;
180 do {
181 q->hba_index = ((q->hba_index + 1) % q->entry_count);
182 released++;
183 } while (q->hba_index != index);
184 return released;
185}
186
187/**
188 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
189 * @q: The Mailbox Queue to operate on.
190 * @wqe: The Mailbox Queue Entry to put on the Work queue.
191 *
192 * This routine will copy the contents of @mqe to the next available entry on
193 * the @q. This function will then ring the Work Queue Doorbell to signal the
194 * HBA to start processing the Work Queue Entry. This function returns 0 if
195 * successful. If no entries are available on @q then this function will return
196 * -ENOMEM.
197 * The caller is expected to hold the hbalock when calling this routine.
198 **/
199static uint32_t
200lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
201{
James Smart2e90f4b2011-12-13 13:22:37 -0500202 struct lpfc_mqe *temp_mqe;
James Smart4f774512009-05-22 14:52:35 -0400203 struct lpfc_register doorbell;
James Smart4f774512009-05-22 14:52:35 -0400204
James Smart2e90f4b2011-12-13 13:22:37 -0500205 /* sanity check on queue memory */
206 if (unlikely(!q))
207 return -ENOMEM;
208 temp_mqe = q->qe[q->host_index].mqe;
209
James Smart4f774512009-05-22 14:52:35 -0400210 /* If the host has not yet processed the next entry then we are done */
211 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
212 return -ENOMEM;
213 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
214 /* Save off the mailbox pointer for completion */
215 q->phba->mbox = (MAILBOX_t *)temp_mqe;
216
217 /* Update the host index before invoking device */
James Smart4f774512009-05-22 14:52:35 -0400218 q->host_index = ((q->host_index + 1) % q->entry_count);
219
220 /* Ring Doorbell */
221 doorbell.word0 = 0;
222 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
223 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
224 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400225 return 0;
226}
227
228/**
229 * lpfc_sli4_mq_release - Updates internal hba index for MQ
230 * @q: The Mailbox Queue to operate on.
231 *
232 * This routine will update the HBA index of a queue to reflect consumption of
233 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
234 * an entry the host calls this function to update the queue's internal
235 * pointers. This routine returns the number of entries that were consumed by
236 * the HBA.
237 **/
238static uint32_t
239lpfc_sli4_mq_release(struct lpfc_queue *q)
240{
James Smart2e90f4b2011-12-13 13:22:37 -0500241 /* sanity check on queue memory */
242 if (unlikely(!q))
243 return 0;
244
James Smart4f774512009-05-22 14:52:35 -0400245 /* Clear the mailbox pointer for completion */
246 q->phba->mbox = NULL;
247 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248 return 1;
249}
250
251/**
252 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
253 * @q: The Event Queue to get the first valid EQE from
254 *
255 * This routine will get the first valid Event Queue Entry from @q, update
256 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
257 * the Queue (no more work to do), or the Queue is full of EQEs that have been
258 * processed, but not popped back to the HBA then this routine will return NULL.
259 **/
260static struct lpfc_eqe *
261lpfc_sli4_eq_get(struct lpfc_queue *q)
262{
James Smart2e90f4b2011-12-13 13:22:37 -0500263 struct lpfc_eqe *eqe;
James Smart027140e2012-08-03 12:35:44 -0400264 uint32_t idx;
James Smart2e90f4b2011-12-13 13:22:37 -0500265
266 /* sanity check on queue memory */
267 if (unlikely(!q))
268 return NULL;
269 eqe = q->qe[q->hba_index].eqe;
James Smart4f774512009-05-22 14:52:35 -0400270
271 /* If the next EQE is not valid then we are done */
James Smartcb5172e2010-03-15 11:25:07 -0400272 if (!bf_get_le32(lpfc_eqe_valid, eqe))
James Smart4f774512009-05-22 14:52:35 -0400273 return NULL;
274 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400275 idx = ((q->hba_index + 1) % q->entry_count);
276 if (idx == q->host_index)
James Smart4f774512009-05-22 14:52:35 -0400277 return NULL;
278
James Smart027140e2012-08-03 12:35:44 -0400279 q->hba_index = idx;
James Smart27f344e2014-05-07 17:16:46 -0400280
281 /*
282 * insert barrier for instruction interlock : data from the hardware
283 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800284 * upon. Speculative instructions were allowing a bcopy at the start
285 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
286 * after our return, to copy data before the valid bit check above
287 * was done. As such, some of the copied data was stale. The barrier
288 * ensures the check is before any data is copied.
James Smart27f344e2014-05-07 17:16:46 -0400289 */
290 mb();
James Smart4f774512009-05-22 14:52:35 -0400291 return eqe;
292}
293
294/**
James Smartba20c852012-08-03 12:36:52 -0400295 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
296 * @q: The Event Queue to disable interrupts
297 *
298 **/
299static inline void
300lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
301{
302 struct lpfc_register doorbell;
303
304 doorbell.word0 = 0;
305 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
306 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
307 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
308 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
309 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
310 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
311}
312
313/**
James Smart4f774512009-05-22 14:52:35 -0400314 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
315 * @q: The Event Queue that the host has completed processing for.
316 * @arm: Indicates whether the host wants to arms this CQ.
317 *
318 * This routine will mark all Event Queue Entries on @q, from the last
319 * known completed entry to the last entry that was processed, as completed
320 * by clearing the valid bit for each completion queue entry. Then it will
321 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
322 * The internal host index in the @q will be updated by this routine to indicate
323 * that the host has finished processing the entries. The @arm parameter
324 * indicates that the queue should be rearmed when ringing the doorbell.
325 *
326 * This function will return the number of EQEs that were popped.
327 **/
328uint32_t
329lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
330{
331 uint32_t released = 0;
332 struct lpfc_eqe *temp_eqe;
333 struct lpfc_register doorbell;
334
James Smart2e90f4b2011-12-13 13:22:37 -0500335 /* sanity check on queue memory */
336 if (unlikely(!q))
337 return 0;
338
James Smart4f774512009-05-22 14:52:35 -0400339 /* while there are valid entries */
340 while (q->hba_index != q->host_index) {
341 temp_eqe = q->qe[q->host_index].eqe;
James Smartcb5172e2010-03-15 11:25:07 -0400342 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
James Smart4f774512009-05-22 14:52:35 -0400343 released++;
344 q->host_index = ((q->host_index + 1) % q->entry_count);
345 }
346 if (unlikely(released == 0 && !arm))
347 return 0;
348
349 /* ring doorbell for number popped */
350 doorbell.word0 = 0;
351 if (arm) {
352 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
353 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354 }
355 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
356 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
James Smart6b5151f2012-01-18 16:24:06 -0500357 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
358 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
359 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400360 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
James Smarta747c9c2009-11-18 15:41:10 -0500361 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
362 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
363 readl(q->phba->sli4_hba.EQCQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400364 return released;
365}
366
367/**
368 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
369 * @q: The Completion Queue to get the first valid CQE from
370 *
371 * This routine will get the first valid Completion Queue Entry from @q, update
372 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
373 * the Queue (no more work to do), or the Queue is full of CQEs that have been
374 * processed, but not popped back to the HBA then this routine will return NULL.
375 **/
376static struct lpfc_cqe *
377lpfc_sli4_cq_get(struct lpfc_queue *q)
378{
379 struct lpfc_cqe *cqe;
James Smart027140e2012-08-03 12:35:44 -0400380 uint32_t idx;
James Smart4f774512009-05-22 14:52:35 -0400381
James Smart2e90f4b2011-12-13 13:22:37 -0500382 /* sanity check on queue memory */
383 if (unlikely(!q))
384 return NULL;
385
James Smart4f774512009-05-22 14:52:35 -0400386 /* If the next CQE is not valid then we are done */
James Smartcb5172e2010-03-15 11:25:07 -0400387 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
James Smart4f774512009-05-22 14:52:35 -0400388 return NULL;
389 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400390 idx = ((q->hba_index + 1) % q->entry_count);
391 if (idx == q->host_index)
James Smart4f774512009-05-22 14:52:35 -0400392 return NULL;
393
394 cqe = q->qe[q->hba_index].cqe;
James Smart027140e2012-08-03 12:35:44 -0400395 q->hba_index = idx;
James Smart27f344e2014-05-07 17:16:46 -0400396
397 /*
398 * insert barrier for instruction interlock : data from the hardware
399 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800400 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
401 * instructions allowing action on content before valid bit checked,
402 * add barrier here as well. May not be needed as "content" is a
403 * single 32-bit entity here (vs multi word structure for cq's).
James Smart27f344e2014-05-07 17:16:46 -0400404 */
405 mb();
James Smart4f774512009-05-22 14:52:35 -0400406 return cqe;
407}
408
409/**
410 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
411 * @q: The Completion Queue that the host has completed processing for.
412 * @arm: Indicates whether the host wants to arms this CQ.
413 *
414 * This routine will mark all Completion queue entries on @q, from the last
415 * known completed entry to the last entry that was processed, as completed
416 * by clearing the valid bit for each completion queue entry. Then it will
417 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
418 * The internal host index in the @q will be updated by this routine to indicate
419 * that the host has finished processing the entries. The @arm parameter
420 * indicates that the queue should be rearmed when ringing the doorbell.
421 *
422 * This function will return the number of CQEs that were released.
423 **/
424uint32_t
425lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
426{
427 uint32_t released = 0;
428 struct lpfc_cqe *temp_qe;
429 struct lpfc_register doorbell;
430
James Smart2e90f4b2011-12-13 13:22:37 -0500431 /* sanity check on queue memory */
432 if (unlikely(!q))
433 return 0;
James Smart4f774512009-05-22 14:52:35 -0400434 /* while there are valid entries */
435 while (q->hba_index != q->host_index) {
436 temp_qe = q->qe[q->host_index].cqe;
James Smartcb5172e2010-03-15 11:25:07 -0400437 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
James Smart4f774512009-05-22 14:52:35 -0400438 released++;
439 q->host_index = ((q->host_index + 1) % q->entry_count);
440 }
441 if (unlikely(released == 0 && !arm))
442 return 0;
443
444 /* ring doorbell for number popped */
445 doorbell.word0 = 0;
446 if (arm)
447 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
448 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
449 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
James Smart6b5151f2012-01-18 16:24:06 -0500450 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
451 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
452 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400453 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
454 return released;
455}
456
457/**
458 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
459 * @q: The Header Receive Queue to operate on.
460 * @wqe: The Receive Queue Entry to put on the Receive queue.
461 *
462 * This routine will copy the contents of @wqe to the next available entry on
463 * the @q. This function will then ring the Receive Queue Doorbell to signal the
464 * HBA to start processing the Receive Queue Entry. This function returns the
465 * index that the rqe was copied to if successful. If no entries are available
466 * on @q then this function will return -ENOMEM.
467 * The caller is expected to hold the hbalock when calling this routine.
468 **/
James Smart895427b2017-02-12 13:52:30 -0800469int
James Smart4f774512009-05-22 14:52:35 -0400470lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
471 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
472{
James Smart2e90f4b2011-12-13 13:22:37 -0500473 struct lpfc_rqe *temp_hrqe;
474 struct lpfc_rqe *temp_drqe;
James Smart4f774512009-05-22 14:52:35 -0400475 struct lpfc_register doorbell;
Wei Yongjun5a25bf32012-12-02 08:33:24 -0500476 int put_index;
James Smart4f774512009-05-22 14:52:35 -0400477
James Smart2e90f4b2011-12-13 13:22:37 -0500478 /* sanity check on queue memory */
479 if (unlikely(!hq) || unlikely(!dq))
480 return -ENOMEM;
Wei Yongjun5a25bf32012-12-02 08:33:24 -0500481 put_index = hq->host_index;
James Smart2e90f4b2011-12-13 13:22:37 -0500482 temp_hrqe = hq->qe[hq->host_index].rqe;
483 temp_drqe = dq->qe[dq->host_index].rqe;
484
James Smart4f774512009-05-22 14:52:35 -0400485 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
486 return -EINVAL;
487 if (hq->host_index != dq->host_index)
488 return -EINVAL;
489 /* If the host has not yet processed the next entry then we are done */
490 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
491 return -EBUSY;
492 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
493 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
494
495 /* Update the host index to point to the next slot */
496 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
497 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
498
499 /* Ring The Header Receive Queue Doorbell */
James Smart73d91e52011-10-10 21:32:10 -0400500 if (!(hq->host_index % hq->entry_repost)) {
James Smart4f774512009-05-22 14:52:35 -0400501 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500502 if (hq->db_format == LPFC_DB_RING_FORMAT) {
503 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
504 hq->entry_repost);
505 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
506 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
507 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
508 hq->entry_repost);
509 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
510 hq->host_index);
511 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
512 } else {
513 return -EINVAL;
514 }
515 writel(doorbell.word0, hq->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400516 }
517 return put_index;
518}
519
520/**
521 * lpfc_sli4_rq_release - Updates internal hba index for RQ
522 * @q: The Header Receive Queue to operate on.
523 *
524 * This routine will update the HBA index of a queue to reflect consumption of
525 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
526 * consumed an entry the host calls this function to update the queue's
527 * internal pointers. This routine returns the number of entries that were
528 * consumed by the HBA.
529 **/
530static uint32_t
531lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
532{
James Smart2e90f4b2011-12-13 13:22:37 -0500533 /* sanity check on queue memory */
534 if (unlikely(!hq) || unlikely(!dq))
535 return 0;
536
James Smart4f774512009-05-22 14:52:35 -0400537 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
538 return 0;
539 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
540 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
541 return 1;
542}
543
James Smarte59058c2008-08-24 21:49:00 -0400544/**
James Smart3621a712009-04-06 18:47:14 -0400545 * lpfc_cmd_iocb - Get next command iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400546 * @phba: Pointer to HBA context object.
547 * @pring: Pointer to driver SLI ring object.
548 *
549 * This function returns pointer to next command iocb entry
550 * in the command ring. The caller must hold hbalock to prevent
551 * other threads consume the next command iocb.
552 * SLI-2/SLI-3 provide different sized iocbs.
553 **/
James Smarted957682007-06-17 19:56:37 -0500554static inline IOCB_t *
555lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
556{
James Smart7e56aa22012-08-03 12:35:34 -0400557 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
558 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
James Smarted957682007-06-17 19:56:37 -0500559}
560
James Smarte59058c2008-08-24 21:49:00 -0400561/**
James Smart3621a712009-04-06 18:47:14 -0400562 * lpfc_resp_iocb - Get next response iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400563 * @phba: Pointer to HBA context object.
564 * @pring: Pointer to driver SLI ring object.
565 *
566 * This function returns pointer to next response iocb entry
567 * in the response ring. The caller must hold hbalock to make sure
568 * that no other thread consume the next response iocb.
569 * SLI-2/SLI-3 provide different sized iocbs.
570 **/
James Smarted957682007-06-17 19:56:37 -0500571static inline IOCB_t *
572lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
573{
James Smart7e56aa22012-08-03 12:35:34 -0400574 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
575 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
James Smarted957682007-06-17 19:56:37 -0500576}
577
James Smarte59058c2008-08-24 21:49:00 -0400578/**
James Smart3621a712009-04-06 18:47:14 -0400579 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400580 * @phba: Pointer to HBA context object.
581 *
582 * This function is called with hbalock held. This function
583 * allocates a new driver iocb object from the iocb pool. If the
584 * allocation is successful, it returns pointer to the newly
585 * allocated iocb object else it returns NULL.
586 **/
James Smart4f2e66c2012-05-09 21:17:07 -0400587struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -0500588__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400589{
590 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
591 struct lpfc_iocbq * iocbq = NULL;
592
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100593 lockdep_assert_held(&phba->hbalock);
594
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400595 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
James Smart2a9bf3d2010-06-07 15:24:45 -0400596 if (iocbq)
597 phba->iocb_cnt++;
598 if (phba->iocb_cnt > phba->iocb_max)
599 phba->iocb_max = phba->iocb_cnt;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400600 return iocbq;
601}
602
James Smarte59058c2008-08-24 21:49:00 -0400603/**
James Smartda0436e2009-05-22 14:51:39 -0400604 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
605 * @phba: Pointer to HBA context object.
606 * @xritag: XRI value.
607 *
608 * This function clears the sglq pointer from the array of acive
609 * sglq's. The xritag that is passed in is used to index into the
610 * array. Before the xritag can be used it needs to be adjusted
611 * by subtracting the xribase.
612 *
613 * Returns sglq ponter = success, NULL = Failure.
614 **/
James Smart895427b2017-02-12 13:52:30 -0800615struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400616__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
617{
James Smartda0436e2009-05-22 14:51:39 -0400618 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400619
620 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
621 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
James Smartda0436e2009-05-22 14:51:39 -0400622 return sglq;
623}
624
625/**
626 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
627 * @phba: Pointer to HBA context object.
628 * @xritag: XRI value.
629 *
630 * This function returns the sglq pointer from the array of acive
631 * sglq's. The xritag that is passed in is used to index into the
632 * array. Before the xritag can be used it needs to be adjusted
633 * by subtracting the xribase.
634 *
635 * Returns sglq ponter = success, NULL = Failure.
636 **/
James Smart0f65ff62010-02-26 14:14:23 -0500637struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400638__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
639{
James Smartda0436e2009-05-22 14:51:39 -0400640 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400641
642 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
James Smartda0436e2009-05-22 14:51:39 -0400643 return sglq;
644}
645
646/**
James Smart1151e3e2011-02-16 12:39:35 -0500647 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500648 * @phba: Pointer to HBA context object.
649 * @xritag: xri used in this exchange.
650 * @rrq: The RRQ to be cleared.
651 *
James Smart19ca7602010-11-20 23:11:55 -0500652 **/
James Smart1151e3e2011-02-16 12:39:35 -0500653void
654lpfc_clr_rrq_active(struct lpfc_hba *phba,
655 uint16_t xritag,
656 struct lpfc_node_rrq *rrq)
James Smart19ca7602010-11-20 23:11:55 -0500657{
James Smart1151e3e2011-02-16 12:39:35 -0500658 struct lpfc_nodelist *ndlp = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500659
James Smart1151e3e2011-02-16 12:39:35 -0500660 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
661 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
James Smart19ca7602010-11-20 23:11:55 -0500662
663 /* The target DID could have been swapped (cable swap)
664 * we should use the ndlp from the findnode if it is
665 * available.
666 */
James Smart1151e3e2011-02-16 12:39:35 -0500667 if ((!ndlp) && rrq->ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500668 ndlp = rrq->ndlp;
669
James Smart1151e3e2011-02-16 12:39:35 -0500670 if (!ndlp)
671 goto out;
672
James Smartcff261f2013-12-17 20:29:47 -0500673 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500674 rrq->send_rrq = 0;
675 rrq->xritag = 0;
676 rrq->rrq_stop_time = 0;
677 }
James Smart1151e3e2011-02-16 12:39:35 -0500678out:
James Smart19ca7602010-11-20 23:11:55 -0500679 mempool_free(rrq, phba->rrq_pool);
680}
681
682/**
683 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
684 * @phba: Pointer to HBA context object.
685 *
686 * This function is called with hbalock held. This function
687 * Checks if stop_time (ratov from setting rrq active) has
688 * been reached, if it has and the send_rrq flag is set then
689 * it will call lpfc_send_rrq. If the send_rrq flag is not set
690 * then it will just call the routine to clear the rrq and
691 * free the rrq resource.
692 * The timer is set to the next rrq that is going to expire before
693 * leaving the routine.
694 *
695 **/
696void
697lpfc_handle_rrq_active(struct lpfc_hba *phba)
698{
699 struct lpfc_node_rrq *rrq;
700 struct lpfc_node_rrq *nextrrq;
701 unsigned long next_time;
702 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500703 LIST_HEAD(send_rrq);
James Smart19ca7602010-11-20 23:11:55 -0500704
705 spin_lock_irqsave(&phba->hbalock, iflags);
706 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
James Smart256ec0d2013-04-17 20:14:58 -0400707 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smart19ca7602010-11-20 23:11:55 -0500708 list_for_each_entry_safe(rrq, nextrrq,
James Smart1151e3e2011-02-16 12:39:35 -0500709 &phba->active_rrq_list, list) {
710 if (time_after(jiffies, rrq->rrq_stop_time))
711 list_move(&rrq->list, &send_rrq);
712 else if (time_before(rrq->rrq_stop_time, next_time))
James Smart19ca7602010-11-20 23:11:55 -0500713 next_time = rrq->rrq_stop_time;
714 }
715 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart06918ac2014-02-20 09:57:57 -0500716 if ((!list_empty(&phba->active_rrq_list)) &&
717 (!(phba->pport->load_flag & FC_UNLOADING)))
James Smart19ca7602010-11-20 23:11:55 -0500718 mod_timer(&phba->rrq_tmr, next_time);
James Smart1151e3e2011-02-16 12:39:35 -0500719 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
720 list_del(&rrq->list);
721 if (!rrq->send_rrq)
722 /* this call will free the rrq */
723 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
724 else if (lpfc_send_rrq(phba, rrq)) {
725 /* if we send the rrq then the completion handler
726 * will clear the bit in the xribitmap.
727 */
728 lpfc_clr_rrq_active(phba, rrq->xritag,
729 rrq);
730 }
731 }
James Smart19ca7602010-11-20 23:11:55 -0500732}
733
734/**
735 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
736 * @vport: Pointer to vport context object.
737 * @xri: The xri used in the exchange.
738 * @did: The targets DID for this exchange.
739 *
740 * returns NULL = rrq not found in the phba->active_rrq_list.
741 * rrq = rrq for this xri and target.
742 **/
743struct lpfc_node_rrq *
744lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
745{
746 struct lpfc_hba *phba = vport->phba;
747 struct lpfc_node_rrq *rrq;
748 struct lpfc_node_rrq *nextrrq;
749 unsigned long iflags;
750
751 if (phba->sli_rev != LPFC_SLI_REV4)
752 return NULL;
753 spin_lock_irqsave(&phba->hbalock, iflags);
754 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
755 if (rrq->vport == vport && rrq->xritag == xri &&
756 rrq->nlp_DID == did){
757 list_del(&rrq->list);
758 spin_unlock_irqrestore(&phba->hbalock, iflags);
759 return rrq;
760 }
761 }
762 spin_unlock_irqrestore(&phba->hbalock, iflags);
763 return NULL;
764}
765
766/**
767 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
768 * @vport: Pointer to vport context object.
James Smart1151e3e2011-02-16 12:39:35 -0500769 * @ndlp: Pointer to the lpfc_node_list structure.
770 * If ndlp is NULL Remove all active RRQs for this vport from the
771 * phba->active_rrq_list and clear the rrq.
772 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
James Smart19ca7602010-11-20 23:11:55 -0500773 **/
774void
James Smart1151e3e2011-02-16 12:39:35 -0500775lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500776
777{
778 struct lpfc_hba *phba = vport->phba;
779 struct lpfc_node_rrq *rrq;
780 struct lpfc_node_rrq *nextrrq;
781 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500782 LIST_HEAD(rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500783
784 if (phba->sli_rev != LPFC_SLI_REV4)
785 return;
James Smart1151e3e2011-02-16 12:39:35 -0500786 if (!ndlp) {
787 lpfc_sli4_vport_delete_els_xri_aborted(vport);
788 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
James Smart19ca7602010-11-20 23:11:55 -0500789 }
James Smart1151e3e2011-02-16 12:39:35 -0500790 spin_lock_irqsave(&phba->hbalock, iflags);
791 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
792 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
793 list_move(&rrq->list, &rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500794 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart1151e3e2011-02-16 12:39:35 -0500795
796 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
797 list_del(&rrq->list);
798 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
799 }
James Smart19ca7602010-11-20 23:11:55 -0500800}
801
802/**
James Smart1151e3e2011-02-16 12:39:35 -0500803 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500804 * @phba: Pointer to HBA context object.
805 * @ndlp: Targets nodelist pointer for this exchange.
806 * @xritag the xri in the bitmap to test.
807 *
808 * This function is called with hbalock held. This function
809 * returns 0 = rrq not active for this xri
810 * 1 = rrq is valid for this xri.
811 **/
James Smart1151e3e2011-02-16 12:39:35 -0500812int
813lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smart19ca7602010-11-20 23:11:55 -0500814 uint16_t xritag)
815{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100816 lockdep_assert_held(&phba->hbalock);
James Smart19ca7602010-11-20 23:11:55 -0500817 if (!ndlp)
818 return 0;
James Smartcff261f2013-12-17 20:29:47 -0500819 if (!ndlp->active_rrqs_xri_bitmap)
820 return 0;
821 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
James Smart19ca7602010-11-20 23:11:55 -0500822 return 1;
823 else
824 return 0;
825}
826
827/**
828 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
829 * @phba: Pointer to HBA context object.
830 * @ndlp: nodelist pointer for this target.
831 * @xritag: xri used in this exchange.
832 * @rxid: Remote Exchange ID.
833 * @send_rrq: Flag used to determine if we should send rrq els cmd.
834 *
835 * This function takes the hbalock.
836 * The active bit is always set in the active rrq xri_bitmap even
837 * if there is no slot avaiable for the other rrq information.
838 *
839 * returns 0 rrq actived for this xri
840 * < 0 No memory or invalid ndlp.
841 **/
842int
843lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smartb42c07c2012-01-18 16:25:55 -0500844 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
James Smart19ca7602010-11-20 23:11:55 -0500845{
James Smart19ca7602010-11-20 23:11:55 -0500846 unsigned long iflags;
James Smartb42c07c2012-01-18 16:25:55 -0500847 struct lpfc_node_rrq *rrq;
848 int empty;
849
850 if (!ndlp)
851 return -EINVAL;
852
853 if (!phba->cfg_enable_rrq)
854 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -0500855
856 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -0500857 if (phba->pport->load_flag & FC_UNLOADING) {
858 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
859 goto out;
860 }
861
862 /*
863 * set the active bit even if there is no mem available.
864 */
865 if (NLP_CHK_FREE_REQ(ndlp))
866 goto out;
867
868 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
869 goto out;
870
James Smartcff261f2013-12-17 20:29:47 -0500871 if (!ndlp->active_rrqs_xri_bitmap)
872 goto out;
873
874 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
James Smartb42c07c2012-01-18 16:25:55 -0500875 goto out;
876
James Smart19ca7602010-11-20 23:11:55 -0500877 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -0500878 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
879 if (!rrq) {
880 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
881 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
882 " DID:0x%x Send:%d\n",
883 xritag, rxid, ndlp->nlp_DID, send_rrq);
884 return -EINVAL;
885 }
James Smarte5771b42013-03-01 16:37:14 -0500886 if (phba->cfg_enable_rrq == 1)
887 rrq->send_rrq = send_rrq;
888 else
889 rrq->send_rrq = 0;
James Smartb42c07c2012-01-18 16:25:55 -0500890 rrq->xritag = xritag;
James Smart256ec0d2013-04-17 20:14:58 -0400891 rrq->rrq_stop_time = jiffies +
892 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smartb42c07c2012-01-18 16:25:55 -0500893 rrq->ndlp = ndlp;
894 rrq->nlp_DID = ndlp->nlp_DID;
895 rrq->vport = ndlp->vport;
896 rrq->rxid = rxid;
James Smartb42c07c2012-01-18 16:25:55 -0500897 spin_lock_irqsave(&phba->hbalock, iflags);
898 empty = list_empty(&phba->active_rrq_list);
899 list_add_tail(&rrq->list, &phba->active_rrq_list);
900 phba->hba_flag |= HBA_RRQ_ACTIVE;
901 if (empty)
902 lpfc_worker_wake_up(phba);
903 spin_unlock_irqrestore(&phba->hbalock, iflags);
904 return 0;
905out:
906 spin_unlock_irqrestore(&phba->hbalock, iflags);
907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
908 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
909 " DID:0x%x Send:%d\n",
910 xritag, rxid, ndlp->nlp_DID, send_rrq);
911 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -0500912}
913
914/**
James Smart895427b2017-02-12 13:52:30 -0800915 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
James Smartda0436e2009-05-22 14:51:39 -0400916 * @phba: Pointer to HBA context object.
James Smart19ca7602010-11-20 23:11:55 -0500917 * @piocb: Pointer to the iocbq.
James Smartda0436e2009-05-22 14:51:39 -0400918 *
James Smartdafe8ce2014-09-03 12:56:40 -0400919 * This function is called with the ring lock held. This function
James Smart6d368e52011-05-24 11:44:12 -0400920 * gets a new driver sglq object from the sglq list. If the
James Smartda0436e2009-05-22 14:51:39 -0400921 * list is not empty then it is successful, it returns pointer to the newly
922 * allocated sglq object else it returns NULL.
923 **/
924static struct lpfc_sglq *
James Smart895427b2017-02-12 13:52:30 -0800925__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
James Smartda0436e2009-05-22 14:51:39 -0400926{
James Smart895427b2017-02-12 13:52:30 -0800927 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
James Smartda0436e2009-05-22 14:51:39 -0400928 struct lpfc_sglq *sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500929 struct lpfc_sglq *start_sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500930 struct lpfc_scsi_buf *lpfc_cmd;
931 struct lpfc_nodelist *ndlp;
932 int found = 0;
933
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100934 lockdep_assert_held(&phba->hbalock);
935
James Smart19ca7602010-11-20 23:11:55 -0500936 if (piocbq->iocb_flag & LPFC_IO_FCP) {
937 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
938 ndlp = lpfc_cmd->rdata->pnode;
James Smartbe858b62010-12-15 17:57:20 -0500939 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
James Smart6c7cf482015-04-07 15:07:25 -0400940 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
James Smart19ca7602010-11-20 23:11:55 -0500941 ndlp = piocbq->context_un.ndlp;
James Smart6c7cf482015-04-07 15:07:25 -0400942 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
943 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
944 ndlp = NULL;
945 else
946 ndlp = piocbq->context_un.ndlp;
947 } else {
James Smart19ca7602010-11-20 23:11:55 -0500948 ndlp = piocbq->context1;
James Smart6c7cf482015-04-07 15:07:25 -0400949 }
James Smart19ca7602010-11-20 23:11:55 -0500950
James Smart895427b2017-02-12 13:52:30 -0800951 spin_lock(&phba->sli4_hba.sgl_list_lock);
952 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
James Smart19ca7602010-11-20 23:11:55 -0500953 start_sglq = sglq;
954 while (!found) {
955 if (!sglq)
James Smartd11f54b2017-03-04 09:30:24 -0800956 break;
James Smart895427b2017-02-12 13:52:30 -0800957 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
958 test_bit(sglq->sli4_lxritag,
959 ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500960 /* This xri has an rrq outstanding for this DID.
961 * put it back in the list and get another xri.
962 */
James Smart895427b2017-02-12 13:52:30 -0800963 list_add_tail(&sglq->list, lpfc_els_sgl_list);
James Smart19ca7602010-11-20 23:11:55 -0500964 sglq = NULL;
James Smart895427b2017-02-12 13:52:30 -0800965 list_remove_head(lpfc_els_sgl_list, sglq,
James Smart19ca7602010-11-20 23:11:55 -0500966 struct lpfc_sglq, list);
967 if (sglq == start_sglq) {
968 sglq = NULL;
969 break;
970 } else
971 continue;
972 }
973 sglq->ndlp = ndlp;
974 found = 1;
James Smart6d368e52011-05-24 11:44:12 -0400975 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
James Smart19ca7602010-11-20 23:11:55 -0500976 sglq->state = SGL_ALLOCATED;
977 }
James Smart895427b2017-02-12 13:52:30 -0800978 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smartda0436e2009-05-22 14:51:39 -0400979 return sglq;
980}
981
982/**
James Smartf358dd02017-02-12 13:52:34 -0800983 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
984 * @phba: Pointer to HBA context object.
985 * @piocb: Pointer to the iocbq.
986 *
987 * This function is called with the sgl_list lock held. This function
988 * gets a new driver sglq object from the sglq list. If the
989 * list is not empty then it is successful, it returns pointer to the newly
990 * allocated sglq object else it returns NULL.
991 **/
992struct lpfc_sglq *
993__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
994{
995 struct list_head *lpfc_nvmet_sgl_list;
996 struct lpfc_sglq *sglq = NULL;
997
998 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
999
1000 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1001
1002 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1003 if (!sglq)
1004 return NULL;
1005 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1006 sglq->state = SGL_ALLOCATED;
dea31012005-04-17 16:05:31 -05001007 return sglq;
1008}
1009
James Smarte59058c2008-08-24 21:49:00 -04001010/**
James Smart3621a712009-04-06 18:47:14 -04001011 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001012 * @phba: Pointer to HBA context object.
1013 *
1014 * This function is called with no lock held. This function
1015 * allocates a new driver iocb object from the iocb pool. If the
1016 * allocation is successful, it returns pointer to the newly
1017 * allocated iocb object else it returns NULL.
1018 **/
James Smart2e0fef82007-06-17 19:56:36 -05001019struct lpfc_iocbq *
1020lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James Bottomley604a3e32005-10-29 10:28:33 -05001021{
James Smart2e0fef82007-06-17 19:56:36 -05001022 struct lpfc_iocbq * iocbq = NULL;
1023 unsigned long iflags;
1024
1025 spin_lock_irqsave(&phba->hbalock, iflags);
1026 iocbq = __lpfc_sli_get_iocbq(phba);
1027 spin_unlock_irqrestore(&phba->hbalock, iflags);
1028 return iocbq;
1029}
1030
James Smarte59058c2008-08-24 21:49:00 -04001031/**
James Smart4f774512009-05-22 14:52:35 -04001032 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1033 * @phba: Pointer to HBA context object.
1034 * @iocbq: Pointer to driver iocb object.
1035 *
1036 * This function is called with hbalock held to release driver
1037 * iocb object to the iocb pool. The iotag in the iocb object
1038 * does not change for each use of the iocb object. This function
1039 * clears all other fields of the iocb object when it is freed.
1040 * The sqlq structure that holds the xritag and phys and virtual
1041 * mappings for the scatter gather list is retrieved from the
1042 * active array of sglq. The get of the sglq pointer also clears
1043 * the entry in the array. If the status of the IO indiactes that
1044 * this IO was aborted then the sglq entry it put on the
1045 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1046 * IO has good status or fails for any other reason then the sglq
James Smart895427b2017-02-12 13:52:30 -08001047 * entry is added to the free list (lpfc_els_sgl_list).
James Smart4f774512009-05-22 14:52:35 -04001048 **/
1049static void
1050__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1051{
1052 struct lpfc_sglq *sglq;
1053 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04001054 unsigned long iflag = 0;
James Smart895427b2017-02-12 13:52:30 -08001055 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04001056
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001057 lockdep_assert_held(&phba->hbalock);
1058
James Smart4f774512009-05-22 14:52:35 -04001059 if (iocbq->sli4_xritag == NO_XRI)
1060 sglq = NULL;
1061 else
James Smart6d368e52011-05-24 11:44:12 -04001062 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1063
James Smart0e9bb8d2013-03-01 16:35:12 -05001064
James Smart4f774512009-05-22 14:52:35 -04001065 if (sglq) {
James Smartf358dd02017-02-12 13:52:34 -08001066 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1067 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1068 iflag);
James Smart0f65ff62010-02-26 14:14:23 -05001069 sglq->state = SGL_FREED;
James Smart19ca7602010-11-20 23:11:55 -05001070 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001071 list_add_tail(&sglq->list,
James Smartf358dd02017-02-12 13:52:34 -08001072 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1073 spin_unlock_irqrestore(
1074 &phba->sli4_hba.sgl_list_lock, iflag);
1075 goto out;
1076 }
1077
James Smart895427b2017-02-12 13:52:30 -08001078 pring = phba->sli4_hba.els_wq->pring;
James Smart4f774512009-05-22 14:52:35 -04001079 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1080 (sglq->state != SGL_XRI_ABORTED)) {
James Smart895427b2017-02-12 13:52:30 -08001081 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1082 iflag);
James Smart341af102010-01-26 23:07:37 -05001083 list_add(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001084 &phba->sli4_hba.lpfc_abts_els_sgl_list);
James Smart4f774512009-05-22 14:52:35 -04001085 spin_unlock_irqrestore(
James Smart895427b2017-02-12 13:52:30 -08001086 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart4f774512009-05-22 14:52:35 -04001087 } else {
James Smart895427b2017-02-12 13:52:30 -08001088 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1089 iflag);
James Smart4f774512009-05-22 14:52:35 -04001090 sglq->state = SGL_FREED;
1091 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001092 list_add_tail(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001093 &phba->sli4_hba.lpfc_els_sgl_list);
1094 spin_unlock_irqrestore(
1095 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart2a9bf3d2010-06-07 15:24:45 -04001096
1097 /* Check if TXQ queue needs to be serviced */
James Smart0e9bb8d2013-03-01 16:35:12 -05001098 if (!list_empty(&pring->txq))
James Smart2a9bf3d2010-06-07 15:24:45 -04001099 lpfc_worker_wake_up(phba);
James Smart0f65ff62010-02-26 14:14:23 -05001100 }
James Smart4f774512009-05-22 14:52:35 -04001101 }
1102
James Smartf358dd02017-02-12 13:52:34 -08001103out:
James Smart4f774512009-05-22 14:52:35 -04001104 /*
1105 * Clean all volatile data fields, preserve iotag and node struct.
1106 */
1107 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
James Smart6d368e52011-05-24 11:44:12 -04001108 iocbq->sli4_lxritag = NO_XRI;
James Smart4f774512009-05-22 14:52:35 -04001109 iocbq->sli4_xritag = NO_XRI;
James Smartf358dd02017-02-12 13:52:34 -08001110 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1111 LPFC_IO_NVME_LS);
James Smart4f774512009-05-22 14:52:35 -04001112 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1113}
1114
James Smart2a9bf3d2010-06-07 15:24:45 -04001115
James Smart4f774512009-05-22 14:52:35 -04001116/**
James Smart3772a992009-05-22 14:50:54 -04001117 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1118 * @phba: Pointer to HBA context object.
1119 * @iocbq: Pointer to driver iocb object.
1120 *
1121 * This function is called with hbalock held to release driver
1122 * iocb object to the iocb pool. The iotag in the iocb object
1123 * does not change for each use of the iocb object. This function
1124 * clears all other fields of the iocb object when it is freed.
1125 **/
1126static void
1127__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1128{
1129 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1130
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001131 lockdep_assert_held(&phba->hbalock);
James Smart0e9bb8d2013-03-01 16:35:12 -05001132
1133 /*
James Smart3772a992009-05-22 14:50:54 -04001134 * Clean all volatile data fields, preserve iotag and node struct.
1135 */
1136 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1137 iocbq->sli4_xritag = NO_XRI;
1138 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1139}
1140
1141/**
James Smart3621a712009-04-06 18:47:14 -04001142 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001143 * @phba: Pointer to HBA context object.
1144 * @iocbq: Pointer to driver iocb object.
1145 *
1146 * This function is called with hbalock held to release driver
1147 * iocb object to the iocb pool. The iotag in the iocb object
1148 * does not change for each use of the iocb object. This function
1149 * clears all other fields of the iocb object when it is freed.
1150 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001151static void
James Smart2e0fef82007-06-17 19:56:36 -05001152__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1153{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001154 lockdep_assert_held(&phba->hbalock);
1155
James Smart3772a992009-05-22 14:50:54 -04001156 phba->__lpfc_sli_release_iocbq(phba, iocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -04001157 phba->iocb_cnt--;
James Bottomley604a3e32005-10-29 10:28:33 -05001158}
1159
James Smarte59058c2008-08-24 21:49:00 -04001160/**
James Smart3621a712009-04-06 18:47:14 -04001161 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001162 * @phba: Pointer to HBA context object.
1163 * @iocbq: Pointer to driver iocb object.
1164 *
1165 * This function is called with no lock held to release the iocb to
1166 * iocb pool.
1167 **/
James Smart2e0fef82007-06-17 19:56:36 -05001168void
1169lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1170{
1171 unsigned long iflags;
1172
1173 /*
1174 * Clean all volatile data fields, preserve iotag and node struct.
1175 */
1176 spin_lock_irqsave(&phba->hbalock, iflags);
1177 __lpfc_sli_release_iocbq(phba, iocbq);
1178 spin_unlock_irqrestore(&phba->hbalock, iflags);
1179}
1180
James Smarte59058c2008-08-24 21:49:00 -04001181/**
James Smarta257bf92009-04-06 18:48:10 -04001182 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1183 * @phba: Pointer to HBA context object.
1184 * @iocblist: List of IOCBs.
1185 * @ulpstatus: ULP status in IOCB command field.
1186 * @ulpWord4: ULP word-4 in IOCB command field.
1187 *
1188 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1189 * on the list by invoking the complete callback function associated with the
1190 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1191 * fields.
1192 **/
1193void
1194lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1195 uint32_t ulpstatus, uint32_t ulpWord4)
1196{
1197 struct lpfc_iocbq *piocb;
1198
1199 while (!list_empty(iocblist)) {
1200 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
James Smarta257bf92009-04-06 18:48:10 -04001201 if (!piocb->iocb_cmpl)
1202 lpfc_sli_release_iocbq(phba, piocb);
1203 else {
1204 piocb->iocb.ulpStatus = ulpstatus;
1205 piocb->iocb.un.ulpWord[4] = ulpWord4;
1206 (piocb->iocb_cmpl) (phba, piocb, piocb);
1207 }
1208 }
1209 return;
1210}
1211
1212/**
James Smart3621a712009-04-06 18:47:14 -04001213 * lpfc_sli_iocb_cmd_type - Get the iocb type
1214 * @iocb_cmnd: iocb command code.
James Smarte59058c2008-08-24 21:49:00 -04001215 *
1216 * This function is called by ring event handler function to get the iocb type.
1217 * This function translates the iocb command to an iocb command type used to
1218 * decide the final disposition of each completed IOCB.
1219 * The function returns
1220 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1221 * LPFC_SOL_IOCB if it is a solicited iocb completion
1222 * LPFC_ABORT_IOCB if it is an abort iocb
1223 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1224 *
1225 * The caller is not required to hold any lock.
1226 **/
dea31012005-04-17 16:05:31 -05001227static lpfc_iocb_type
1228lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1229{
1230 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1231
1232 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1233 return 0;
1234
1235 switch (iocb_cmnd) {
1236 case CMD_XMIT_SEQUENCE_CR:
1237 case CMD_XMIT_SEQUENCE_CX:
1238 case CMD_XMIT_BCAST_CN:
1239 case CMD_XMIT_BCAST_CX:
1240 case CMD_ELS_REQUEST_CR:
1241 case CMD_ELS_REQUEST_CX:
1242 case CMD_CREATE_XRI_CR:
1243 case CMD_CREATE_XRI_CX:
1244 case CMD_GET_RPI_CN:
1245 case CMD_XMIT_ELS_RSP_CX:
1246 case CMD_GET_RPI_CR:
1247 case CMD_FCP_IWRITE_CR:
1248 case CMD_FCP_IWRITE_CX:
1249 case CMD_FCP_IREAD_CR:
1250 case CMD_FCP_IREAD_CX:
1251 case CMD_FCP_ICMND_CR:
1252 case CMD_FCP_ICMND_CX:
James Smartf5603512006-12-02 13:35:43 -05001253 case CMD_FCP_TSEND_CX:
1254 case CMD_FCP_TRSP_CX:
1255 case CMD_FCP_TRECEIVE_CX:
1256 case CMD_FCP_AUTO_TRSP_CX:
dea31012005-04-17 16:05:31 -05001257 case CMD_ADAPTER_MSG:
1258 case CMD_ADAPTER_DUMP:
1259 case CMD_XMIT_SEQUENCE64_CR:
1260 case CMD_XMIT_SEQUENCE64_CX:
1261 case CMD_XMIT_BCAST64_CN:
1262 case CMD_XMIT_BCAST64_CX:
1263 case CMD_ELS_REQUEST64_CR:
1264 case CMD_ELS_REQUEST64_CX:
1265 case CMD_FCP_IWRITE64_CR:
1266 case CMD_FCP_IWRITE64_CX:
1267 case CMD_FCP_IREAD64_CR:
1268 case CMD_FCP_IREAD64_CX:
1269 case CMD_FCP_ICMND64_CR:
1270 case CMD_FCP_ICMND64_CX:
James Smartf5603512006-12-02 13:35:43 -05001271 case CMD_FCP_TSEND64_CX:
1272 case CMD_FCP_TRSP64_CX:
1273 case CMD_FCP_TRECEIVE64_CX:
dea31012005-04-17 16:05:31 -05001274 case CMD_GEN_REQUEST64_CR:
1275 case CMD_GEN_REQUEST64_CX:
1276 case CMD_XMIT_ELS_RSP64_CX:
James Smartda0436e2009-05-22 14:51:39 -04001277 case DSSCMD_IWRITE64_CR:
1278 case DSSCMD_IWRITE64_CX:
1279 case DSSCMD_IREAD64_CR:
1280 case DSSCMD_IREAD64_CX:
dea31012005-04-17 16:05:31 -05001281 type = LPFC_SOL_IOCB;
1282 break;
1283 case CMD_ABORT_XRI_CN:
1284 case CMD_ABORT_XRI_CX:
1285 case CMD_CLOSE_XRI_CN:
1286 case CMD_CLOSE_XRI_CX:
1287 case CMD_XRI_ABORTED_CX:
1288 case CMD_ABORT_MXRI64_CN:
James Smart6669f9b2009-10-02 15:16:45 -04001289 case CMD_XMIT_BLS_RSP64_CX:
dea31012005-04-17 16:05:31 -05001290 type = LPFC_ABORT_IOCB;
1291 break;
1292 case CMD_RCV_SEQUENCE_CX:
1293 case CMD_RCV_ELS_REQ_CX:
1294 case CMD_RCV_SEQUENCE64_CX:
1295 case CMD_RCV_ELS_REQ64_CX:
James Smart57127f12007-10-27 13:37:05 -04001296 case CMD_ASYNC_STATUS:
James Smarted957682007-06-17 19:56:37 -05001297 case CMD_IOCB_RCV_SEQ64_CX:
1298 case CMD_IOCB_RCV_ELS64_CX:
1299 case CMD_IOCB_RCV_CONT64_CX:
James Smart3163f722008-02-08 18:50:25 -05001300 case CMD_IOCB_RET_XRI64_CX:
dea31012005-04-17 16:05:31 -05001301 type = LPFC_UNSOL_IOCB;
1302 break;
James Smart3163f722008-02-08 18:50:25 -05001303 case CMD_IOCB_XMIT_MSEQ64_CR:
1304 case CMD_IOCB_XMIT_MSEQ64_CX:
1305 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1306 case CMD_IOCB_RCV_ELS_LIST64_CX:
1307 case CMD_IOCB_CLOSE_EXTENDED_CN:
1308 case CMD_IOCB_ABORT_EXTENDED_CN:
1309 case CMD_IOCB_RET_HBQE64_CN:
1310 case CMD_IOCB_FCP_IBIDIR64_CR:
1311 case CMD_IOCB_FCP_IBIDIR64_CX:
1312 case CMD_IOCB_FCP_ITASKMGT64_CX:
1313 case CMD_IOCB_LOGENTRY_CN:
1314 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1315 printk("%s - Unhandled SLI-3 Command x%x\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07001316 __func__, iocb_cmnd);
James Smart3163f722008-02-08 18:50:25 -05001317 type = LPFC_UNKNOWN_IOCB;
1318 break;
dea31012005-04-17 16:05:31 -05001319 default:
1320 type = LPFC_UNKNOWN_IOCB;
1321 break;
1322 }
1323
1324 return type;
1325}
1326
James Smarte59058c2008-08-24 21:49:00 -04001327/**
James Smart3621a712009-04-06 18:47:14 -04001328 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
James Smarte59058c2008-08-24 21:49:00 -04001329 * @phba: Pointer to HBA context object.
1330 *
1331 * This function is called from SLI initialization code
1332 * to configure every ring of the HBA's SLI interface. The
1333 * caller is not required to hold any lock. This function issues
1334 * a config_ring mailbox command for each ring.
1335 * This function returns zero if successful else returns a negative
1336 * error code.
1337 **/
dea31012005-04-17 16:05:31 -05001338static int
James Smarted957682007-06-17 19:56:37 -05001339lpfc_sli_ring_map(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05001340{
1341 struct lpfc_sli *psli = &phba->sli;
James Smarted957682007-06-17 19:56:37 -05001342 LPFC_MBOXQ_t *pmb;
1343 MAILBOX_t *pmbox;
1344 int i, rc, ret = 0;
dea31012005-04-17 16:05:31 -05001345
James Smarted957682007-06-17 19:56:37 -05001346 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1347 if (!pmb)
1348 return -ENOMEM;
James Smart04c68492009-05-22 14:52:52 -04001349 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05001350 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05001351 for (i = 0; i < psli->num_rings; i++) {
dea31012005-04-17 16:05:31 -05001352 lpfc_config_ring(phba, i, pmb);
1353 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1354 if (rc != MBX_SUCCESS) {
James Smart92d7f7b2007-06-17 19:56:38 -05001355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04001356 "0446 Adapter failed to init (%d), "
dea31012005-04-17 16:05:31 -05001357 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1358 "ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001359 rc, pmbox->mbxCommand,
1360 pmbox->mbxStatus, i);
James Smart2e0fef82007-06-17 19:56:36 -05001361 phba->link_state = LPFC_HBA_ERROR;
James Smarted957682007-06-17 19:56:37 -05001362 ret = -ENXIO;
1363 break;
dea31012005-04-17 16:05:31 -05001364 }
1365 }
James Smarted957682007-06-17 19:56:37 -05001366 mempool_free(pmb, phba->mbox_mem_pool);
1367 return ret;
dea31012005-04-17 16:05:31 -05001368}
1369
James Smarte59058c2008-08-24 21:49:00 -04001370/**
James Smart3621a712009-04-06 18:47:14 -04001371 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
James Smarte59058c2008-08-24 21:49:00 -04001372 * @phba: Pointer to HBA context object.
1373 * @pring: Pointer to driver SLI ring object.
1374 * @piocb: Pointer to the driver iocb object.
1375 *
1376 * This function is called with hbalock held. The function adds the
1377 * new iocb to txcmplq of the given ring. This function always returns
1378 * 0. If this function is called for ELS ring, this function checks if
1379 * there is a vport associated with the ELS command. This function also
1380 * starts els_tmofunc timer if this is an ELS command.
1381 **/
dea31012005-04-17 16:05:31 -05001382static int
James Smart2e0fef82007-06-17 19:56:36 -05001383lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1384 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05001385{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001386 lockdep_assert_held(&phba->hbalock);
1387
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001388 BUG_ON(!piocb);
Johannes Thumshirn22466da2016-07-29 15:30:56 +02001389
dea31012005-04-17 16:05:31 -05001390 list_add_tail(&piocb->list, &pring->txcmplq);
James Smart4f2e66c2012-05-09 21:17:07 -04001391 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
James Smart2a9bf3d2010-06-07 15:24:45 -04001392
James Smart92d7f7b2007-06-17 19:56:38 -05001393 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1394 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001395 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1396 BUG_ON(!piocb->vport);
1397 if (!(piocb->vport->load_flag & FC_UNLOADING))
1398 mod_timer(&piocb->vport->els_tmofunc,
1399 jiffies +
1400 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1401 }
dea31012005-04-17 16:05:31 -05001402
James Smart2e0fef82007-06-17 19:56:36 -05001403 return 0;
dea31012005-04-17 16:05:31 -05001404}
1405
James Smarte59058c2008-08-24 21:49:00 -04001406/**
James Smart3621a712009-04-06 18:47:14 -04001407 * lpfc_sli_ringtx_get - Get first element of the txq
James Smarte59058c2008-08-24 21:49:00 -04001408 * @phba: Pointer to HBA context object.
1409 * @pring: Pointer to driver SLI ring object.
1410 *
1411 * This function is called with hbalock held to get next
1412 * iocb in txq of the given ring. If there is any iocb in
1413 * the txq, the function returns first iocb in the list after
1414 * removing the iocb from the list, else it returns NULL.
1415 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04001416struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05001417lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001418{
dea31012005-04-17 16:05:31 -05001419 struct lpfc_iocbq *cmd_iocb;
1420
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001421 lockdep_assert_held(&phba->hbalock);
1422
James Smart858c9f62007-06-17 19:56:39 -05001423 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
James Smart2e0fef82007-06-17 19:56:36 -05001424 return cmd_iocb;
dea31012005-04-17 16:05:31 -05001425}
1426
James Smarte59058c2008-08-24 21:49:00 -04001427/**
James Smart3621a712009-04-06 18:47:14 -04001428 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
James Smarte59058c2008-08-24 21:49:00 -04001429 * @phba: Pointer to HBA context object.
1430 * @pring: Pointer to driver SLI ring object.
1431 *
1432 * This function is called with hbalock held and the caller must post the
1433 * iocb without releasing the lock. If the caller releases the lock,
1434 * iocb slot returned by the function is not guaranteed to be available.
1435 * The function returns pointer to the next available iocb slot if there
1436 * is available slot in the ring, else it returns NULL.
1437 * If the get index of the ring is ahead of the put index, the function
1438 * will post an error attention event to the worker thread to take the
1439 * HBA to offline state.
1440 **/
dea31012005-04-17 16:05:31 -05001441static IOCB_t *
1442lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1443{
James Smart34b02dc2008-08-24 21:49:55 -04001444 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James Smart7e56aa22012-08-03 12:35:34 -04001445 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001446
1447 lockdep_assert_held(&phba->hbalock);
1448
James Smart7e56aa22012-08-03 12:35:34 -04001449 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1450 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1451 pring->sli.sli3.next_cmdidx = 0;
dea31012005-04-17 16:05:31 -05001452
James Smart7e56aa22012-08-03 12:35:34 -04001453 if (unlikely(pring->sli.sli3.local_getidx ==
1454 pring->sli.sli3.next_cmdidx)) {
dea31012005-04-17 16:05:31 -05001455
James Smart7e56aa22012-08-03 12:35:34 -04001456 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05001457
James Smart7e56aa22012-08-03 12:35:34 -04001458 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea31012005-04-17 16:05:31 -05001459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001460 "0315 Ring %d issue: portCmdGet %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02001461 "is bigger than cmd ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001462 pring->ringno,
James Smart7e56aa22012-08-03 12:35:34 -04001463 pring->sli.sli3.local_getidx,
1464 max_cmd_idx);
dea31012005-04-17 16:05:31 -05001465
James Smart2e0fef82007-06-17 19:56:36 -05001466 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05001467 /*
1468 * All error attention handlers are posted to
1469 * worker thread
1470 */
1471 phba->work_ha |= HA_ERATT;
1472 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05001473
James Smart5e9d9b82008-06-14 22:52:53 -04001474 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -05001475
1476 return NULL;
1477 }
1478
James Smart7e56aa22012-08-03 12:35:34 -04001479 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea31012005-04-17 16:05:31 -05001480 return NULL;
1481 }
1482
James Smarted957682007-06-17 19:56:37 -05001483 return lpfc_cmd_iocb(phba, pring);
dea31012005-04-17 16:05:31 -05001484}
1485
James Smarte59058c2008-08-24 21:49:00 -04001486/**
James Smart3621a712009-04-06 18:47:14 -04001487 * lpfc_sli_next_iotag - Get an iotag for the iocb
James Smarte59058c2008-08-24 21:49:00 -04001488 * @phba: Pointer to HBA context object.
1489 * @iocbq: Pointer to driver iocb object.
1490 *
1491 * This function gets an iotag for the iocb. If there is no unused iotag and
1492 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1493 * array and assigns a new iotag.
1494 * The function returns the allocated iotag if successful, else returns zero.
1495 * Zero is not a valid iotag.
1496 * The caller is not required to hold any lock.
1497 **/
James Bottomley604a3e32005-10-29 10:28:33 -05001498uint16_t
James Smart2e0fef82007-06-17 19:56:36 -05001499lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea31012005-04-17 16:05:31 -05001500{
James Smart2e0fef82007-06-17 19:56:36 -05001501 struct lpfc_iocbq **new_arr;
1502 struct lpfc_iocbq **old_arr;
James Bottomley604a3e32005-10-29 10:28:33 -05001503 size_t new_len;
1504 struct lpfc_sli *psli = &phba->sli;
1505 uint16_t iotag;
dea31012005-04-17 16:05:31 -05001506
James Smart2e0fef82007-06-17 19:56:36 -05001507 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001508 iotag = psli->last_iotag;
1509 if(++iotag < psli->iocbq_lookup_len) {
1510 psli->last_iotag = iotag;
1511 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001512 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001513 iocbq->iotag = iotag;
1514 return iotag;
James Smart2e0fef82007-06-17 19:56:36 -05001515 } else if (psli->iocbq_lookup_len < (0xffff
James Bottomley604a3e32005-10-29 10:28:33 -05001516 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1517 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
James Smart2e0fef82007-06-17 19:56:36 -05001518 spin_unlock_irq(&phba->hbalock);
1519 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
James Bottomley604a3e32005-10-29 10:28:33 -05001520 GFP_KERNEL);
1521 if (new_arr) {
James Smart2e0fef82007-06-17 19:56:36 -05001522 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001523 old_arr = psli->iocbq_lookup;
1524 if (new_len <= psli->iocbq_lookup_len) {
1525 /* highly unprobable case */
1526 kfree(new_arr);
1527 iotag = psli->last_iotag;
1528 if(++iotag < psli->iocbq_lookup_len) {
1529 psli->last_iotag = iotag;
1530 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001531 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001532 iocbq->iotag = iotag;
1533 return iotag;
1534 }
James Smart2e0fef82007-06-17 19:56:36 -05001535 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001536 return 0;
1537 }
1538 if (psli->iocbq_lookup)
1539 memcpy(new_arr, old_arr,
1540 ((psli->last_iotag + 1) *
James Smart311464e2007-08-02 11:10:37 -04001541 sizeof (struct lpfc_iocbq *)));
James Bottomley604a3e32005-10-29 10:28:33 -05001542 psli->iocbq_lookup = new_arr;
1543 psli->iocbq_lookup_len = new_len;
1544 psli->last_iotag = iotag;
1545 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001546 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001547 iocbq->iotag = iotag;
1548 kfree(old_arr);
1549 return iotag;
1550 }
James Smart8f6d98d2006-08-01 07:34:00 -04001551 } else
James Smart2e0fef82007-06-17 19:56:36 -05001552 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001553
James Smartbc739052010-08-04 16:11:18 -04001554 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001555 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1556 psli->last_iotag);
dea31012005-04-17 16:05:31 -05001557
James Bottomley604a3e32005-10-29 10:28:33 -05001558 return 0;
dea31012005-04-17 16:05:31 -05001559}
1560
James Smarte59058c2008-08-24 21:49:00 -04001561/**
James Smart3621a712009-04-06 18:47:14 -04001562 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
James Smarte59058c2008-08-24 21:49:00 -04001563 * @phba: Pointer to HBA context object.
1564 * @pring: Pointer to driver SLI ring object.
1565 * @iocb: Pointer to iocb slot in the ring.
1566 * @nextiocb: Pointer to driver iocb object which need to be
1567 * posted to firmware.
1568 *
1569 * This function is called with hbalock held to post a new iocb to
1570 * the firmware. This function copies the new iocb to ring iocb slot and
1571 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1572 * a completion call back for this iocb else the function will free the
1573 * iocb object.
1574 **/
dea31012005-04-17 16:05:31 -05001575static void
1576lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1577 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1578{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001579 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001580 /*
James Bottomley604a3e32005-10-29 10:28:33 -05001581 * Set up an iotag
dea31012005-04-17 16:05:31 -05001582 */
James Bottomley604a3e32005-10-29 10:28:33 -05001583 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea31012005-04-17 16:05:31 -05001584
James Smarte2a0a9d2008-12-04 22:40:02 -05001585
James Smarta58cbd52007-08-02 11:09:43 -04001586 if (pring->ringno == LPFC_ELS_RING) {
1587 lpfc_debugfs_slow_ring_trc(phba,
1588 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1589 *(((uint32_t *) &nextiocb->iocb) + 4),
1590 *(((uint32_t *) &nextiocb->iocb) + 6),
1591 *(((uint32_t *) &nextiocb->iocb) + 7));
1592 }
1593
dea31012005-04-17 16:05:31 -05001594 /*
1595 * Issue iocb command to adapter
1596 */
James Smart92d7f7b2007-06-17 19:56:38 -05001597 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea31012005-04-17 16:05:31 -05001598 wmb();
1599 pring->stats.iocb_cmd++;
1600
1601 /*
1602 * If there is no completion routine to call, we can release the
1603 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1604 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1605 */
1606 if (nextiocb->iocb_cmpl)
1607 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
James Bottomley604a3e32005-10-29 10:28:33 -05001608 else
James Smart2e0fef82007-06-17 19:56:36 -05001609 __lpfc_sli_release_iocbq(phba, nextiocb);
dea31012005-04-17 16:05:31 -05001610
1611 /*
1612 * Let the HBA know what IOCB slot will be the next one the
1613 * driver will put a command into.
1614 */
James Smart7e56aa22012-08-03 12:35:34 -04001615 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1616 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea31012005-04-17 16:05:31 -05001617}
1618
James Smarte59058c2008-08-24 21:49:00 -04001619/**
James Smart3621a712009-04-06 18:47:14 -04001620 * lpfc_sli_update_full_ring - Update the chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001621 * @phba: Pointer to HBA context object.
1622 * @pring: Pointer to driver SLI ring object.
1623 *
1624 * The caller is not required to hold any lock for calling this function.
1625 * This function updates the chip attention bits for the ring to inform firmware
1626 * that there are pending work to be done for this ring and requests an
1627 * interrupt when there is space available in the ring. This function is
1628 * called when the driver is unable to post more iocbs to the ring due
1629 * to unavailability of space in the ring.
1630 **/
dea31012005-04-17 16:05:31 -05001631static void
James Smart2e0fef82007-06-17 19:56:36 -05001632lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001633{
1634 int ringno = pring->ringno;
1635
1636 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1637
1638 wmb();
1639
1640 /*
1641 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1642 * The HBA will tell us when an IOCB entry is available.
1643 */
1644 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1645 readl(phba->CAregaddr); /* flush */
1646
1647 pring->stats.iocb_cmd_full++;
1648}
1649
James Smarte59058c2008-08-24 21:49:00 -04001650/**
James Smart3621a712009-04-06 18:47:14 -04001651 * lpfc_sli_update_ring - Update chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001652 * @phba: Pointer to HBA context object.
1653 * @pring: Pointer to driver SLI ring object.
1654 *
1655 * This function updates the chip attention register bit for the
1656 * given ring to inform HBA that there is more work to be done
1657 * in this ring. The caller is not required to hold any lock.
1658 **/
dea31012005-04-17 16:05:31 -05001659static void
James Smart2e0fef82007-06-17 19:56:36 -05001660lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001661{
1662 int ringno = pring->ringno;
1663
1664 /*
1665 * Tell the HBA that there is work to do in this ring.
1666 */
James Smart34b02dc2008-08-24 21:49:55 -04001667 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1668 wmb();
1669 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1670 readl(phba->CAregaddr); /* flush */
1671 }
dea31012005-04-17 16:05:31 -05001672}
1673
James Smarte59058c2008-08-24 21:49:00 -04001674/**
James Smart3621a712009-04-06 18:47:14 -04001675 * lpfc_sli_resume_iocb - Process iocbs in the txq
James Smarte59058c2008-08-24 21:49:00 -04001676 * @phba: Pointer to HBA context object.
1677 * @pring: Pointer to driver SLI ring object.
1678 *
1679 * This function is called with hbalock held to post pending iocbs
1680 * in the txq to the firmware. This function is called when driver
1681 * detects space available in the ring.
1682 **/
dea31012005-04-17 16:05:31 -05001683static void
James Smart2e0fef82007-06-17 19:56:36 -05001684lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001685{
1686 IOCB_t *iocb;
1687 struct lpfc_iocbq *nextiocb;
1688
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001689 lockdep_assert_held(&phba->hbalock);
1690
dea31012005-04-17 16:05:31 -05001691 /*
1692 * Check to see if:
1693 * (a) there is anything on the txq to send
1694 * (b) link is up
1695 * (c) link attention events can be processed (fcp ring only)
1696 * (d) IOCB processing is not blocked by the outstanding mbox command.
1697 */
James Smart0e9bb8d2013-03-01 16:35:12 -05001698
1699 if (lpfc_is_link_up(phba) &&
1700 (!list_empty(&pring->txq)) &&
James Smart895427b2017-02-12 13:52:30 -08001701 (pring->ringno != LPFC_FCP_RING ||
James Smart0b727fe2007-10-27 13:37:25 -04001702 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea31012005-04-17 16:05:31 -05001703
1704 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1705 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1706 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1707
1708 if (iocb)
1709 lpfc_sli_update_ring(phba, pring);
1710 else
1711 lpfc_sli_update_full_ring(phba, pring);
1712 }
1713
1714 return;
1715}
1716
James Smarte59058c2008-08-24 21:49:00 -04001717/**
James Smart3621a712009-04-06 18:47:14 -04001718 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001719 * @phba: Pointer to HBA context object.
1720 * @hbqno: HBQ number.
1721 *
1722 * This function is called with hbalock held to get the next
1723 * available slot for the given HBQ. If there is free slot
1724 * available for the HBQ it will return pointer to the next available
1725 * HBQ entry else it will return NULL.
1726 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001727static struct lpfc_hbq_entry *
James Smarted957682007-06-17 19:56:37 -05001728lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1729{
1730 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1731
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001732 lockdep_assert_held(&phba->hbalock);
1733
James Smarted957682007-06-17 19:56:37 -05001734 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1735 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1736 hbqp->next_hbqPutIdx = 0;
1737
1738 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
James Smart92d7f7b2007-06-17 19:56:38 -05001739 uint32_t raw_index = phba->hbq_get[hbqno];
James Smarted957682007-06-17 19:56:37 -05001740 uint32_t getidx = le32_to_cpu(raw_index);
1741
1742 hbqp->local_hbqGetIdx = getidx;
1743
1744 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1745 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05001746 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04001747 "1802 HBQ %d: local_hbqGetIdx "
James Smarted957682007-06-17 19:56:37 -05001748 "%u is > than hbqp->entry_count %u\n",
James Smarte8b62012007-08-02 11:10:09 -04001749 hbqno, hbqp->local_hbqGetIdx,
James Smarted957682007-06-17 19:56:37 -05001750 hbqp->entry_count);
1751
1752 phba->link_state = LPFC_HBA_ERROR;
1753 return NULL;
1754 }
1755
1756 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1757 return NULL;
1758 }
1759
James Smart51ef4c22007-08-02 11:10:31 -04001760 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1761 hbqp->hbqPutIdx;
James Smarted957682007-06-17 19:56:37 -05001762}
1763
James Smarte59058c2008-08-24 21:49:00 -04001764/**
James Smart3621a712009-04-06 18:47:14 -04001765 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
James Smarte59058c2008-08-24 21:49:00 -04001766 * @phba: Pointer to HBA context object.
1767 *
1768 * This function is called with no lock held to free all the
1769 * hbq buffers while uninitializing the SLI interface. It also
1770 * frees the HBQ buffers returned by the firmware but not yet
1771 * processed by the upper layers.
1772 **/
James Smarted957682007-06-17 19:56:37 -05001773void
1774lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1775{
James Smart92d7f7b2007-06-17 19:56:38 -05001776 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1777 struct hbq_dmabuf *hbq_buf;
James Smart3163f722008-02-08 18:50:25 -05001778 unsigned long flags;
James Smart51ef4c22007-08-02 11:10:31 -04001779 int i, hbq_count;
James Smarted957682007-06-17 19:56:37 -05001780
James Smart51ef4c22007-08-02 11:10:31 -04001781 hbq_count = lpfc_sli_hbq_count();
James Smarted957682007-06-17 19:56:37 -05001782 /* Return all memory used by all HBQs */
James Smart3163f722008-02-08 18:50:25 -05001783 spin_lock_irqsave(&phba->hbalock, flags);
James Smart51ef4c22007-08-02 11:10:31 -04001784 for (i = 0; i < hbq_count; ++i) {
1785 list_for_each_entry_safe(dmabuf, next_dmabuf,
1786 &phba->hbqs[i].hbq_buffer_list, list) {
1787 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1788 list_del(&hbq_buf->dbuf.list);
1789 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1790 }
James Smarta8adb832007-10-27 13:37:53 -04001791 phba->hbqs[i].buffer_count = 0;
James Smarted957682007-06-17 19:56:37 -05001792 }
James Smart3163f722008-02-08 18:50:25 -05001793
1794 /* Mark the HBQs not in use */
1795 phba->hbq_in_use = 0;
1796 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smarted957682007-06-17 19:56:37 -05001797}
1798
James Smarte59058c2008-08-24 21:49:00 -04001799/**
James Smart3621a712009-04-06 18:47:14 -04001800 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04001801 * @phba: Pointer to HBA context object.
1802 * @hbqno: HBQ number.
1803 * @hbq_buf: Pointer to HBQ buffer.
1804 *
1805 * This function is called with the hbalock held to post a
1806 * hbq buffer to the firmware. If the function finds an empty
1807 * slot in the HBQ, it will post the buffer. The function will return
1808 * pointer to the hbq entry if it successfully post the buffer
1809 * else it will return NULL.
1810 **/
James Smart3772a992009-05-22 14:50:54 -04001811static int
James Smarted957682007-06-17 19:56:37 -05001812lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
James Smart92d7f7b2007-06-17 19:56:38 -05001813 struct hbq_dmabuf *hbq_buf)
James Smarted957682007-06-17 19:56:37 -05001814{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001815 lockdep_assert_held(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04001816 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1817}
1818
1819/**
1820 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1821 * @phba: Pointer to HBA context object.
1822 * @hbqno: HBQ number.
1823 * @hbq_buf: Pointer to HBQ buffer.
1824 *
1825 * This function is called with the hbalock held to post a hbq buffer to the
1826 * firmware. If the function finds an empty slot in the HBQ, it will post the
1827 * buffer and place it on the hbq_buffer_list. The function will return zero if
1828 * it successfully post the buffer else it will return an error.
1829 **/
1830static int
1831lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1832 struct hbq_dmabuf *hbq_buf)
1833{
James Smarted957682007-06-17 19:56:37 -05001834 struct lpfc_hbq_entry *hbqe;
James Smart92d7f7b2007-06-17 19:56:38 -05001835 dma_addr_t physaddr = hbq_buf->dbuf.phys;
James Smarted957682007-06-17 19:56:37 -05001836
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001837 lockdep_assert_held(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05001838 /* Get next HBQ entry slot to use */
1839 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1840 if (hbqe) {
1841 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1842
James Smart92d7f7b2007-06-17 19:56:38 -05001843 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1844 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
James Smart895427b2017-02-12 13:52:30 -08001845 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
James Smarted957682007-06-17 19:56:37 -05001846 hbqe->bde.tus.f.bdeFlags = 0;
James Smart92d7f7b2007-06-17 19:56:38 -05001847 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1848 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1849 /* Sync SLIM */
James Smarted957682007-06-17 19:56:37 -05001850 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1851 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
James Smart92d7f7b2007-06-17 19:56:38 -05001852 /* flush */
James Smarted957682007-06-17 19:56:37 -05001853 readl(phba->hbq_put + hbqno);
James Smart51ef4c22007-08-02 11:10:31 -04001854 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
James Smart3772a992009-05-22 14:50:54 -04001855 return 0;
1856 } else
1857 return -ENOMEM;
James Smarted957682007-06-17 19:56:37 -05001858}
1859
James Smart4f774512009-05-22 14:52:35 -04001860/**
1861 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1862 * @phba: Pointer to HBA context object.
1863 * @hbqno: HBQ number.
1864 * @hbq_buf: Pointer to HBQ buffer.
1865 *
1866 * This function is called with the hbalock held to post an RQE to the SLI4
1867 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1868 * the hbq_buffer_list and return zero, otherwise it will return an error.
1869 **/
1870static int
1871lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1872 struct hbq_dmabuf *hbq_buf)
1873{
1874 int rc;
1875 struct lpfc_rqe hrqe;
1876 struct lpfc_rqe drqe;
James Smart895427b2017-02-12 13:52:30 -08001877 struct lpfc_queue *hrq;
1878 struct lpfc_queue *drq;
1879
1880 if (hbqno != LPFC_ELS_HBQ)
1881 return 1;
1882 hrq = phba->sli4_hba.hdr_rq;
1883 drq = phba->sli4_hba.dat_rq;
James Smart4f774512009-05-22 14:52:35 -04001884
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001885 lockdep_assert_held(&phba->hbalock);
James Smart4f774512009-05-22 14:52:35 -04001886 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1887 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1888 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1889 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
James Smart895427b2017-02-12 13:52:30 -08001890 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
James Smart4f774512009-05-22 14:52:35 -04001891 if (rc < 0)
1892 return rc;
James Smart895427b2017-02-12 13:52:30 -08001893 hbq_buf->tag = (rc | (hbqno << 16));
James Smart4f774512009-05-22 14:52:35 -04001894 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1895 return 0;
1896}
1897
James Smarte59058c2008-08-24 21:49:00 -04001898/* HBQ for ELS and CT traffic. */
James Smart92d7f7b2007-06-17 19:56:38 -05001899static struct lpfc_hbq_init lpfc_els_hbq = {
1900 .rn = 1,
James Smartdef9c7a2009-12-21 17:02:28 -05001901 .entry_count = 256,
James Smart92d7f7b2007-06-17 19:56:38 -05001902 .mask_count = 0,
1903 .profile = 0,
James Smart51ef4c22007-08-02 11:10:31 -04001904 .ring_mask = (1 << LPFC_ELS_RING),
James Smart92d7f7b2007-06-17 19:56:38 -05001905 .buffer_count = 0,
James Smarta257bf92009-04-06 18:48:10 -04001906 .init_count = 40,
1907 .add_count = 40,
James Smart92d7f7b2007-06-17 19:56:38 -05001908};
James Smarted957682007-06-17 19:56:37 -05001909
James Smarte59058c2008-08-24 21:49:00 -04001910/* Array of HBQs */
James Smart78b2d852007-08-02 11:10:21 -04001911struct lpfc_hbq_init *lpfc_hbq_defs[] = {
James Smart92d7f7b2007-06-17 19:56:38 -05001912 &lpfc_els_hbq,
1913};
1914
James Smarte59058c2008-08-24 21:49:00 -04001915/**
James Smart3621a712009-04-06 18:47:14 -04001916 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
James Smarte59058c2008-08-24 21:49:00 -04001917 * @phba: Pointer to HBA context object.
1918 * @hbqno: HBQ number.
1919 * @count: Number of HBQ buffers to be posted.
1920 *
James Smartd7c255b2008-08-24 21:50:00 -04001921 * This function is called with no lock held to post more hbq buffers to the
1922 * given HBQ. The function returns the number of HBQ buffers successfully
1923 * posted.
James Smarte59058c2008-08-24 21:49:00 -04001924 **/
James Smart311464e2007-08-02 11:10:37 -04001925static int
James Smart92d7f7b2007-06-17 19:56:38 -05001926lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1927{
James Smartd7c255b2008-08-24 21:50:00 -04001928 uint32_t i, posted = 0;
James Smart3163f722008-02-08 18:50:25 -05001929 unsigned long flags;
James Smart92d7f7b2007-06-17 19:56:38 -05001930 struct hbq_dmabuf *hbq_buffer;
James Smartd7c255b2008-08-24 21:50:00 -04001931 LIST_HEAD(hbq_buf_list);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07001932 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
James Smart51ef4c22007-08-02 11:10:31 -04001933 return 0;
James Smart51ef4c22007-08-02 11:10:31 -04001934
James Smartd7c255b2008-08-24 21:50:00 -04001935 if ((phba->hbqs[hbqno].buffer_count + count) >
1936 lpfc_hbq_defs[hbqno]->entry_count)
1937 count = lpfc_hbq_defs[hbqno]->entry_count -
1938 phba->hbqs[hbqno].buffer_count;
1939 if (!count)
1940 return 0;
1941 /* Allocate HBQ entries */
1942 for (i = 0; i < count; i++) {
1943 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1944 if (!hbq_buffer)
1945 break;
1946 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1947 }
James Smart3163f722008-02-08 18:50:25 -05001948 /* Check whether HBQ is still in use */
1949 spin_lock_irqsave(&phba->hbalock, flags);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07001950 if (!phba->hbq_in_use)
James Smartd7c255b2008-08-24 21:50:00 -04001951 goto err;
1952 while (!list_empty(&hbq_buf_list)) {
1953 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1954 dbuf.list);
1955 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1956 (hbqno << 16));
James Smart3772a992009-05-22 14:50:54 -04001957 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
James Smarta8adb832007-10-27 13:37:53 -04001958 phba->hbqs[hbqno].buffer_count++;
James Smartd7c255b2008-08-24 21:50:00 -04001959 posted++;
1960 } else
James Smart51ef4c22007-08-02 11:10:31 -04001961 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smart92d7f7b2007-06-17 19:56:38 -05001962 }
James Smart3163f722008-02-08 18:50:25 -05001963 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smartd7c255b2008-08-24 21:50:00 -04001964 return posted;
1965err:
1966 spin_unlock_irqrestore(&phba->hbalock, flags);
1967 while (!list_empty(&hbq_buf_list)) {
1968 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1969 dbuf.list);
1970 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1971 }
James Smart92d7f7b2007-06-17 19:56:38 -05001972 return 0;
James Smarted957682007-06-17 19:56:37 -05001973}
1974
James Smarte59058c2008-08-24 21:49:00 -04001975/**
James Smart3621a712009-04-06 18:47:14 -04001976 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
James Smarte59058c2008-08-24 21:49:00 -04001977 * @phba: Pointer to HBA context object.
1978 * @qno: HBQ number.
1979 *
1980 * This function posts more buffers to the HBQ. This function
James Smartd7c255b2008-08-24 21:50:00 -04001981 * is called with no lock held. The function returns the number of HBQ entries
1982 * successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04001983 **/
James Smarted957682007-06-17 19:56:37 -05001984int
James Smart92d7f7b2007-06-17 19:56:38 -05001985lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05001986{
James Smartdef9c7a2009-12-21 17:02:28 -05001987 if (phba->sli_rev == LPFC_SLI_REV4)
1988 return 0;
1989 else
1990 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1991 lpfc_hbq_defs[qno]->add_count);
James Smarted957682007-06-17 19:56:37 -05001992}
1993
James Smarte59058c2008-08-24 21:49:00 -04001994/**
James Smart3621a712009-04-06 18:47:14 -04001995 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001996 * @phba: Pointer to HBA context object.
1997 * @qno: HBQ queue number.
1998 *
1999 * This function is called from SLI initialization code path with
2000 * no lock held to post initial HBQ buffers to firmware. The
James Smartd7c255b2008-08-24 21:50:00 -04002001 * function returns the number of HBQ entries successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04002002 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002003static int
James Smart92d7f7b2007-06-17 19:56:38 -05002004lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05002005{
James Smartdef9c7a2009-12-21 17:02:28 -05002006 if (phba->sli_rev == LPFC_SLI_REV4)
2007 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
James Smart73d91e52011-10-10 21:32:10 -04002008 lpfc_hbq_defs[qno]->entry_count);
James Smartdef9c7a2009-12-21 17:02:28 -05002009 else
2010 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2011 lpfc_hbq_defs[qno]->init_count);
James Smarted957682007-06-17 19:56:37 -05002012}
2013
James Smarte59058c2008-08-24 21:49:00 -04002014/**
James Smart3772a992009-05-22 14:50:54 -04002015 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2016 * @phba: Pointer to HBA context object.
2017 * @hbqno: HBQ number.
2018 *
2019 * This function removes the first hbq buffer on an hbq list and returns a
2020 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2021 **/
2022static struct hbq_dmabuf *
2023lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2024{
2025 struct lpfc_dmabuf *d_buf;
2026
2027 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2028 if (!d_buf)
2029 return NULL;
2030 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2031}
2032
2033/**
James Smart2d7dbc42017-02-12 13:52:35 -08002034 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2035 * @phba: Pointer to HBA context object.
2036 * @hbqno: HBQ number.
2037 *
2038 * This function removes the first RQ buffer on an RQ buffer list and returns a
2039 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2040 **/
2041static struct rqb_dmabuf *
2042lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2043{
2044 struct lpfc_dmabuf *h_buf;
2045 struct lpfc_rqb *rqbp;
2046
2047 rqbp = hrq->rqbp;
2048 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2049 struct lpfc_dmabuf, list);
2050 if (!h_buf)
2051 return NULL;
2052 rqbp->buffer_count--;
2053 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2054}
2055
2056/**
James Smart3621a712009-04-06 18:47:14 -04002057 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
James Smarte59058c2008-08-24 21:49:00 -04002058 * @phba: Pointer to HBA context object.
2059 * @tag: Tag of the hbq buffer.
2060 *
Sebastian Herbszt71892412016-04-17 13:27:27 +02002061 * This function searches for the hbq buffer associated with the given tag in
2062 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2063 * otherwise it returns NULL.
James Smarte59058c2008-08-24 21:49:00 -04002064 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002065static struct hbq_dmabuf *
James Smarted957682007-06-17 19:56:37 -05002066lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2067{
James Smart92d7f7b2007-06-17 19:56:38 -05002068 struct lpfc_dmabuf *d_buf;
2069 struct hbq_dmabuf *hbq_buf;
James Smart51ef4c22007-08-02 11:10:31 -04002070 uint32_t hbqno;
James Smarted957682007-06-17 19:56:37 -05002071
James Smart51ef4c22007-08-02 11:10:31 -04002072 hbqno = tag >> 16;
Jesper Juhla0a74e452007-08-09 20:47:15 +02002073 if (hbqno >= LPFC_MAX_HBQS)
James Smart51ef4c22007-08-02 11:10:31 -04002074 return NULL;
2075
James Smart3772a992009-05-22 14:50:54 -04002076 spin_lock_irq(&phba->hbalock);
James Smart51ef4c22007-08-02 11:10:31 -04002077 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
James Smart92d7f7b2007-06-17 19:56:38 -05002078 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
James Smart51ef4c22007-08-02 11:10:31 -04002079 if (hbq_buf->tag == tag) {
James Smart3772a992009-05-22 14:50:54 -04002080 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002081 return hbq_buf;
James Smarted957682007-06-17 19:56:37 -05002082 }
2083 }
James Smart3772a992009-05-22 14:50:54 -04002084 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04002086 "1803 Bad hbq tag. Data: x%x x%x\n",
James Smarta8adb832007-10-27 13:37:53 -04002087 tag, phba->hbqs[tag >> 16].buffer_count);
James Smart92d7f7b2007-06-17 19:56:38 -05002088 return NULL;
James Smarted957682007-06-17 19:56:37 -05002089}
2090
James Smarte59058c2008-08-24 21:49:00 -04002091/**
James Smart3621a712009-04-06 18:47:14 -04002092 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04002093 * @phba: Pointer to HBA context object.
2094 * @hbq_buffer: Pointer to HBQ buffer.
2095 *
2096 * This function is called with hbalock. This function gives back
2097 * the hbq buffer to firmware. If the HBQ does not have space to
2098 * post the buffer, it will free the buffer.
2099 **/
James Smarted957682007-06-17 19:56:37 -05002100void
James Smart51ef4c22007-08-02 11:10:31 -04002101lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
James Smarted957682007-06-17 19:56:37 -05002102{
2103 uint32_t hbqno;
2104
James Smart51ef4c22007-08-02 11:10:31 -04002105 if (hbq_buffer) {
2106 hbqno = hbq_buffer->tag >> 16;
James Smart3772a992009-05-22 14:50:54 -04002107 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
James Smart51ef4c22007-08-02 11:10:31 -04002108 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smarted957682007-06-17 19:56:37 -05002109 }
2110}
2111
James Smarte59058c2008-08-24 21:49:00 -04002112/**
James Smart3621a712009-04-06 18:47:14 -04002113 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
James Smarte59058c2008-08-24 21:49:00 -04002114 * @mbxCommand: mailbox command code.
2115 *
2116 * This function is called by the mailbox event handler function to verify
2117 * that the completed mailbox command is a legitimate mailbox command. If the
2118 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2119 * and the mailbox event handler will take the HBA offline.
2120 **/
dea31012005-04-17 16:05:31 -05002121static int
2122lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2123{
2124 uint8_t ret;
2125
2126 switch (mbxCommand) {
2127 case MBX_LOAD_SM:
2128 case MBX_READ_NV:
2129 case MBX_WRITE_NV:
James Smarta8adb832007-10-27 13:37:53 -04002130 case MBX_WRITE_VPARMS:
dea31012005-04-17 16:05:31 -05002131 case MBX_RUN_BIU_DIAG:
2132 case MBX_INIT_LINK:
2133 case MBX_DOWN_LINK:
2134 case MBX_CONFIG_LINK:
2135 case MBX_CONFIG_RING:
2136 case MBX_RESET_RING:
2137 case MBX_READ_CONFIG:
2138 case MBX_READ_RCONFIG:
2139 case MBX_READ_SPARM:
2140 case MBX_READ_STATUS:
2141 case MBX_READ_RPI:
2142 case MBX_READ_XRI:
2143 case MBX_READ_REV:
2144 case MBX_READ_LNK_STAT:
2145 case MBX_REG_LOGIN:
2146 case MBX_UNREG_LOGIN:
dea31012005-04-17 16:05:31 -05002147 case MBX_CLEAR_LA:
2148 case MBX_DUMP_MEMORY:
2149 case MBX_DUMP_CONTEXT:
2150 case MBX_RUN_DIAGS:
2151 case MBX_RESTART:
2152 case MBX_UPDATE_CFG:
2153 case MBX_DOWN_LOAD:
2154 case MBX_DEL_LD_ENTRY:
2155 case MBX_RUN_PROGRAM:
2156 case MBX_SET_MASK:
James Smart09372822008-01-11 01:52:54 -05002157 case MBX_SET_VARIABLE:
dea31012005-04-17 16:05:31 -05002158 case MBX_UNREG_D_ID:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002159 case MBX_KILL_BOARD:
dea31012005-04-17 16:05:31 -05002160 case MBX_CONFIG_FARP:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002161 case MBX_BEACON:
dea31012005-04-17 16:05:31 -05002162 case MBX_LOAD_AREA:
2163 case MBX_RUN_BIU_DIAG64:
2164 case MBX_CONFIG_PORT:
2165 case MBX_READ_SPARM64:
2166 case MBX_READ_RPI64:
2167 case MBX_REG_LOGIN64:
James Smart76a95d72010-11-20 23:11:48 -05002168 case MBX_READ_TOPOLOGY:
James Smart09372822008-01-11 01:52:54 -05002169 case MBX_WRITE_WWN:
dea31012005-04-17 16:05:31 -05002170 case MBX_SET_DEBUG:
2171 case MBX_LOAD_EXP_ROM:
James Smart57127f12007-10-27 13:37:05 -04002172 case MBX_ASYNCEVT_ENABLE:
James Smart92d7f7b2007-06-17 19:56:38 -05002173 case MBX_REG_VPI:
2174 case MBX_UNREG_VPI:
James Smart858c9f62007-06-17 19:56:39 -05002175 case MBX_HEARTBEAT:
James Smart84774a42008-08-24 21:50:06 -04002176 case MBX_PORT_CAPABILITIES:
2177 case MBX_PORT_IOV_CONTROL:
James Smart04c68492009-05-22 14:52:52 -04002178 case MBX_SLI4_CONFIG:
2179 case MBX_SLI4_REQ_FTRS:
2180 case MBX_REG_FCFI:
2181 case MBX_UNREG_FCFI:
2182 case MBX_REG_VFI:
2183 case MBX_UNREG_VFI:
2184 case MBX_INIT_VPI:
2185 case MBX_INIT_VFI:
2186 case MBX_RESUME_RPI:
James Smartc7495932010-04-06 15:05:28 -04002187 case MBX_READ_EVENT_LOG_STATUS:
2188 case MBX_READ_EVENT_LOG:
James Smartdcf2a4e2010-09-29 11:18:53 -04002189 case MBX_SECURITY_MGMT:
2190 case MBX_AUTH_PORT:
James Smart940eb682012-08-03 12:37:08 -04002191 case MBX_ACCESS_VDATA:
dea31012005-04-17 16:05:31 -05002192 ret = mbxCommand;
2193 break;
2194 default:
2195 ret = MBX_SHUTDOWN;
2196 break;
2197 }
James Smart2e0fef82007-06-17 19:56:36 -05002198 return ret;
dea31012005-04-17 16:05:31 -05002199}
James Smarte59058c2008-08-24 21:49:00 -04002200
2201/**
James Smart3621a712009-04-06 18:47:14 -04002202 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002203 * @phba: Pointer to HBA context object.
2204 * @pmboxq: Pointer to mailbox command.
2205 *
2206 * This is completion handler function for mailbox commands issued from
2207 * lpfc_sli_issue_mbox_wait function. This function is called by the
2208 * mailbox event handler function with no lock held. This function
2209 * will wake up thread waiting on the wait queue pointed by context1
2210 * of the mailbox.
2211 **/
James Smart04c68492009-05-22 14:52:52 -04002212void
James Smart2e0fef82007-06-17 19:56:36 -05002213lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea31012005-04-17 16:05:31 -05002214{
2215 wait_queue_head_t *pdone_q;
James Smart858c9f62007-06-17 19:56:39 -05002216 unsigned long drvr_flag;
dea31012005-04-17 16:05:31 -05002217
2218 /*
2219 * If pdone_q is empty, the driver thread gave up waiting and
2220 * continued running.
2221 */
James Smart7054a602007-04-25 09:52:34 -04002222 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
James Smart858c9f62007-06-17 19:56:39 -05002223 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002224 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2225 if (pdone_q)
2226 wake_up_interruptible(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -05002227 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002228 return;
2229}
2230
James Smarte59058c2008-08-24 21:49:00 -04002231
2232/**
James Smart3621a712009-04-06 18:47:14 -04002233 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002234 * @phba: Pointer to HBA context object.
2235 * @pmb: Pointer to mailbox object.
2236 *
2237 * This function is the default mailbox completion handler. It
2238 * frees the memory resources associated with the completed mailbox
2239 * command. If the completed command is a REG_LOGIN mailbox command,
2240 * this function will issue a UREG_LOGIN to re-claim the RPI.
2241 **/
dea31012005-04-17 16:05:31 -05002242void
James Smart2e0fef82007-06-17 19:56:36 -05002243lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea31012005-04-17 16:05:31 -05002244{
James Smartd439d282010-09-29 11:18:45 -04002245 struct lpfc_vport *vport = pmb->vport;
dea31012005-04-17 16:05:31 -05002246 struct lpfc_dmabuf *mp;
James Smartd439d282010-09-29 11:18:45 -04002247 struct lpfc_nodelist *ndlp;
James Smart5af5eee2010-10-22 11:06:38 -04002248 struct Scsi_Host *shost;
James Smart04c68492009-05-22 14:52:52 -04002249 uint16_t rpi, vpi;
James Smart7054a602007-04-25 09:52:34 -04002250 int rc;
2251
dea31012005-04-17 16:05:31 -05002252 mp = (struct lpfc_dmabuf *) (pmb->context1);
James Smart7054a602007-04-25 09:52:34 -04002253
dea31012005-04-17 16:05:31 -05002254 if (mp) {
2255 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2256 kfree(mp);
2257 }
James Smart7054a602007-04-25 09:52:34 -04002258
2259 /*
2260 * If a REG_LOGIN succeeded after node is destroyed or node
2261 * is in re-discovery driver need to cleanup the RPI.
2262 */
James Smart2e0fef82007-06-17 19:56:36 -05002263 if (!(phba->pport->load_flag & FC_UNLOADING) &&
James Smart04c68492009-05-22 14:52:52 -04002264 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2265 !pmb->u.mb.mbxStatus) {
2266 rpi = pmb->u.mb.un.varWords[0];
James Smart6d368e52011-05-24 11:44:12 -04002267 vpi = pmb->u.mb.un.varRegLogin.vpi;
James Smart04c68492009-05-22 14:52:52 -04002268 lpfc_unreg_login(phba, vpi, rpi, pmb);
James Smartde96e9c2016-03-31 14:12:27 -07002269 pmb->vport = vport;
James Smart92d7f7b2007-06-17 19:56:38 -05002270 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart7054a602007-04-25 09:52:34 -04002271 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2272 if (rc != MBX_NOT_FINISHED)
2273 return;
2274 }
2275
James Smart695a8142010-01-26 23:08:03 -05002276 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2277 !(phba->pport->load_flag & FC_UNLOADING) &&
2278 !pmb->u.mb.mbxStatus) {
James Smart5af5eee2010-10-22 11:06:38 -04002279 shost = lpfc_shost_from_vport(vport);
2280 spin_lock_irq(shost->host_lock);
2281 vport->vpi_state |= LPFC_VPI_REGISTERED;
2282 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2283 spin_unlock_irq(shost->host_lock);
James Smart695a8142010-01-26 23:08:03 -05002284 }
2285
James Smartd439d282010-09-29 11:18:45 -04002286 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2287 ndlp = (struct lpfc_nodelist *)pmb->context2;
2288 lpfc_nlp_put(ndlp);
2289 pmb->context2 = NULL;
2290 }
2291
James Smartdcf2a4e2010-09-29 11:18:53 -04002292 /* Check security permission status on INIT_LINK mailbox command */
2293 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2294 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2295 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2296 "2860 SLI authentication is required "
2297 "for INIT_LINK but has not done yet\n");
2298
James Smart04c68492009-05-22 14:52:52 -04002299 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2300 lpfc_sli4_mbox_cmd_free(phba, pmb);
2301 else
2302 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -05002303}
James Smartbe6bb942015-04-07 15:07:22 -04002304 /**
2305 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2306 * @phba: Pointer to HBA context object.
2307 * @pmb: Pointer to mailbox object.
2308 *
2309 * This function is the unreg rpi mailbox completion handler. It
2310 * frees the memory resources associated with the completed mailbox
2311 * command. An additional refrenece is put on the ndlp to prevent
2312 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2313 * the unreg mailbox command completes, this routine puts the
2314 * reference back.
2315 *
2316 **/
2317void
2318lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2319{
2320 struct lpfc_vport *vport = pmb->vport;
2321 struct lpfc_nodelist *ndlp;
2322
2323 ndlp = pmb->context1;
2324 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2325 if (phba->sli_rev == LPFC_SLI_REV4 &&
2326 (bf_get(lpfc_sli_intf_if_type,
2327 &phba->sli4_hba.sli_intf) ==
2328 LPFC_SLI_INTF_IF_TYPE_2)) {
2329 if (ndlp) {
2330 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2331 "0010 UNREG_LOGIN vpi:%x "
2332 "rpi:%x DID:%x map:%x %p\n",
2333 vport->vpi, ndlp->nlp_rpi,
2334 ndlp->nlp_DID,
2335 ndlp->nlp_usg_map, ndlp);
James Smart7c5e5182015-05-22 10:42:43 -04002336 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
James Smartbe6bb942015-04-07 15:07:22 -04002337 lpfc_nlp_put(ndlp);
2338 }
2339 }
2340 }
2341
2342 mempool_free(pmb, phba->mbox_mem_pool);
2343}
dea31012005-04-17 16:05:31 -05002344
James Smarte59058c2008-08-24 21:49:00 -04002345/**
James Smart3621a712009-04-06 18:47:14 -04002346 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
James Smarte59058c2008-08-24 21:49:00 -04002347 * @phba: Pointer to HBA context object.
2348 *
2349 * This function is called with no lock held. This function processes all
2350 * the completed mailbox commands and gives it to upper layers. The interrupt
2351 * service routine processes mailbox completion interrupt and adds completed
2352 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2353 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2354 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2355 * function returns the mailbox commands to the upper layer by calling the
2356 * completion handler function of each mailbox.
2357 **/
dea31012005-04-17 16:05:31 -05002358int
James Smart2e0fef82007-06-17 19:56:36 -05002359lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002360{
James Smart92d7f7b2007-06-17 19:56:38 -05002361 MAILBOX_t *pmbox;
dea31012005-04-17 16:05:31 -05002362 LPFC_MBOXQ_t *pmb;
James Smart92d7f7b2007-06-17 19:56:38 -05002363 int rc;
2364 LIST_HEAD(cmplq);
dea31012005-04-17 16:05:31 -05002365
2366 phba->sli.slistat.mbox_event++;
2367
James Smart92d7f7b2007-06-17 19:56:38 -05002368 /* Get all completed mailboxe buffers into the cmplq */
2369 spin_lock_irq(&phba->hbalock);
2370 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2371 spin_unlock_irq(&phba->hbalock);
2372
dea31012005-04-17 16:05:31 -05002373 /* Get a Mailbox buffer to setup mailbox commands for callback */
James Smart92d7f7b2007-06-17 19:56:38 -05002374 do {
2375 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2376 if (pmb == NULL)
2377 break;
2378
James Smart04c68492009-05-22 14:52:52 -04002379 pmbox = &pmb->u.mb;
dea31012005-04-17 16:05:31 -05002380
James Smart858c9f62007-06-17 19:56:39 -05002381 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2382 if (pmb->vport) {
2383 lpfc_debugfs_disc_trc(pmb->vport,
2384 LPFC_DISC_TRC_MBOX_VPORT,
2385 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2386 (uint32_t)pmbox->mbxCommand,
2387 pmbox->un.varWords[0],
2388 pmbox->un.varWords[1]);
2389 }
2390 else {
2391 lpfc_debugfs_disc_trc(phba->pport,
2392 LPFC_DISC_TRC_MBOX,
2393 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2394 (uint32_t)pmbox->mbxCommand,
2395 pmbox->un.varWords[0],
2396 pmbox->un.varWords[1]);
2397 }
2398 }
2399
dea31012005-04-17 16:05:31 -05002400 /*
2401 * It is a fatal error if unknown mbox command completion.
2402 */
2403 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2404 MBX_SHUTDOWN) {
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002405 /* Unknown mailbox command compl */
James Smart92d7f7b2007-06-17 19:56:38 -05002406 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002407 "(%d):0323 Unknown Mailbox command "
James Smarta183a152011-10-10 21:32:43 -04002408 "x%x (x%x/x%x) Cmpl\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002409 pmb->vport ? pmb->vport->vpi : 0,
James Smart04c68492009-05-22 14:52:52 -04002410 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002411 lpfc_sli_config_mbox_subsys_get(phba,
2412 pmb),
2413 lpfc_sli_config_mbox_opcode_get(phba,
2414 pmb));
James Smart2e0fef82007-06-17 19:56:36 -05002415 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05002416 phba->work_hs = HS_FFER3;
2417 lpfc_handle_eratt(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05002418 continue;
dea31012005-04-17 16:05:31 -05002419 }
2420
dea31012005-04-17 16:05:31 -05002421 if (pmbox->mbxStatus) {
2422 phba->sli.slistat.mbox_stat_err++;
2423 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2424 /* Mbox cmd cmpl error - RETRYing */
James Smart92d7f7b2007-06-17 19:56:38 -05002425 lpfc_printf_log(phba, KERN_INFO,
James Smarta183a152011-10-10 21:32:43 -04002426 LOG_MBOX | LOG_SLI,
2427 "(%d):0305 Mbox cmd cmpl "
2428 "error - RETRYing Data: x%x "
2429 "(x%x/x%x) x%x x%x x%x\n",
2430 pmb->vport ? pmb->vport->vpi : 0,
2431 pmbox->mbxCommand,
2432 lpfc_sli_config_mbox_subsys_get(phba,
2433 pmb),
2434 lpfc_sli_config_mbox_opcode_get(phba,
2435 pmb),
2436 pmbox->mbxStatus,
2437 pmbox->un.varWords[0],
2438 pmb->vport->port_state);
dea31012005-04-17 16:05:31 -05002439 pmbox->mbxStatus = 0;
2440 pmbox->mbxOwner = OWN_HOST;
dea31012005-04-17 16:05:31 -05002441 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
James Smart04c68492009-05-22 14:52:52 -04002442 if (rc != MBX_NOT_FINISHED)
James Smart92d7f7b2007-06-17 19:56:38 -05002443 continue;
dea31012005-04-17 16:05:31 -05002444 }
2445 }
2446
2447 /* Mailbox cmd <cmd> Cmpl <cmpl> */
James Smart92d7f7b2007-06-17 19:56:38 -05002448 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04002449 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
James Smarte74c03c2013-04-17 20:15:19 -04002450 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2451 "x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002452 pmb->vport ? pmb->vport->vpi : 0,
dea31012005-04-17 16:05:31 -05002453 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002454 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2455 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea31012005-04-17 16:05:31 -05002456 pmb->mbox_cmpl,
2457 *((uint32_t *) pmbox),
2458 pmbox->un.varWords[0],
2459 pmbox->un.varWords[1],
2460 pmbox->un.varWords[2],
2461 pmbox->un.varWords[3],
2462 pmbox->un.varWords[4],
2463 pmbox->un.varWords[5],
2464 pmbox->un.varWords[6],
James Smarte74c03c2013-04-17 20:15:19 -04002465 pmbox->un.varWords[7],
2466 pmbox->un.varWords[8],
2467 pmbox->un.varWords[9],
2468 pmbox->un.varWords[10]);
dea31012005-04-17 16:05:31 -05002469
James Smart92d7f7b2007-06-17 19:56:38 -05002470 if (pmb->mbox_cmpl)
dea31012005-04-17 16:05:31 -05002471 pmb->mbox_cmpl(phba,pmb);
James Smart92d7f7b2007-06-17 19:56:38 -05002472 } while (1);
James Smart2e0fef82007-06-17 19:56:36 -05002473 return 0;
dea31012005-04-17 16:05:31 -05002474}
James Smart92d7f7b2007-06-17 19:56:38 -05002475
James Smarte59058c2008-08-24 21:49:00 -04002476/**
James Smart3621a712009-04-06 18:47:14 -04002477 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
James Smarte59058c2008-08-24 21:49:00 -04002478 * @phba: Pointer to HBA context object.
2479 * @pring: Pointer to driver SLI ring object.
2480 * @tag: buffer tag.
2481 *
2482 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2483 * is set in the tag the buffer is posted for a particular exchange,
2484 * the function will return the buffer without replacing the buffer.
2485 * If the buffer is for unsolicited ELS or CT traffic, this function
2486 * returns the buffer and also posts another buffer to the firmware.
2487 **/
James Smart76bb24e2007-10-27 13:38:00 -04002488static struct lpfc_dmabuf *
2489lpfc_sli_get_buff(struct lpfc_hba *phba,
James Smart9f1e1b52008-12-04 22:39:40 -05002490 struct lpfc_sli_ring *pring,
2491 uint32_t tag)
James Smart76bb24e2007-10-27 13:38:00 -04002492{
James Smart9f1e1b52008-12-04 22:39:40 -05002493 struct hbq_dmabuf *hbq_entry;
2494
James Smart76bb24e2007-10-27 13:38:00 -04002495 if (tag & QUE_BUFTAG_BIT)
2496 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
James Smart9f1e1b52008-12-04 22:39:40 -05002497 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2498 if (!hbq_entry)
2499 return NULL;
2500 return &hbq_entry->dbuf;
James Smart76bb24e2007-10-27 13:38:00 -04002501}
James Smart57127f12007-10-27 13:37:05 -04002502
James Smart3772a992009-05-22 14:50:54 -04002503/**
2504 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2505 * @phba: Pointer to HBA context object.
2506 * @pring: Pointer to driver SLI ring object.
2507 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2508 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2509 * @fch_type: the type for the first frame of the sequence.
2510 *
2511 * This function is called with no lock held. This function uses the r_ctl and
2512 * type of the received sequence to find the correct callback function to call
2513 * to process the sequence.
2514 **/
2515static int
2516lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2517 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2518 uint32_t fch_type)
2519{
2520 int i;
2521
James Smartf358dd02017-02-12 13:52:34 -08002522 switch (fch_type) {
2523 case FC_TYPE_NVME:
James Smartd613b6a2017-02-12 13:52:37 -08002524 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
James Smartf358dd02017-02-12 13:52:34 -08002525 return 1;
2526 default:
2527 break;
2528 }
2529
James Smart3772a992009-05-22 14:50:54 -04002530 /* unSolicited Responses */
2531 if (pring->prt[0].profile) {
2532 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2533 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2534 saveq);
2535 return 1;
2536 }
2537 /* We must search, based on rctl / type
2538 for the right routine */
2539 for (i = 0; i < pring->num_mask; i++) {
2540 if ((pring->prt[i].rctl == fch_r_ctl) &&
2541 (pring->prt[i].type == fch_type)) {
2542 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2543 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2544 (phba, pring, saveq);
2545 return 1;
2546 }
2547 }
2548 return 0;
2549}
James Smarte59058c2008-08-24 21:49:00 -04002550
2551/**
James Smart3621a712009-04-06 18:47:14 -04002552 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
James Smarte59058c2008-08-24 21:49:00 -04002553 * @phba: Pointer to HBA context object.
2554 * @pring: Pointer to driver SLI ring object.
2555 * @saveq: Pointer to the unsolicited iocb.
2556 *
2557 * This function is called with no lock held by the ring event handler
2558 * when there is an unsolicited iocb posted to the response ring by the
2559 * firmware. This function gets the buffer associated with the iocbs
2560 * and calls the event handler for the ring. This function handles both
2561 * qring buffers and hbq buffers.
2562 * When the function returns 1 the caller can free the iocb object otherwise
2563 * upper layer functions will free the iocb objects.
2564 **/
dea31012005-04-17 16:05:31 -05002565static int
2566lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2567 struct lpfc_iocbq *saveq)
2568{
2569 IOCB_t * irsp;
2570 WORD5 * w5p;
2571 uint32_t Rctl, Type;
James Smart76bb24e2007-10-27 13:38:00 -04002572 struct lpfc_iocbq *iocbq;
James Smart3163f722008-02-08 18:50:25 -05002573 struct lpfc_dmabuf *dmzbuf;
dea31012005-04-17 16:05:31 -05002574
dea31012005-04-17 16:05:31 -05002575 irsp = &(saveq->iocb);
James Smart57127f12007-10-27 13:37:05 -04002576
2577 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2578 if (pring->lpfc_sli_rcv_async_status)
2579 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2580 else
2581 lpfc_printf_log(phba,
2582 KERN_WARNING,
2583 LOG_SLI,
2584 "0316 Ring %d handler: unexpected "
2585 "ASYNC_STATUS iocb received evt_code "
2586 "0x%x\n",
2587 pring->ringno,
2588 irsp->un.asyncstat.evt_code);
2589 return 1;
2590 }
2591
James Smart3163f722008-02-08 18:50:25 -05002592 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2593 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2594 if (irsp->ulpBdeCount > 0) {
2595 dmzbuf = lpfc_sli_get_buff(phba, pring,
2596 irsp->un.ulpWord[3]);
2597 lpfc_in_buf_free(phba, dmzbuf);
2598 }
2599
2600 if (irsp->ulpBdeCount > 1) {
2601 dmzbuf = lpfc_sli_get_buff(phba, pring,
2602 irsp->unsli3.sli3Words[3]);
2603 lpfc_in_buf_free(phba, dmzbuf);
2604 }
2605
2606 if (irsp->ulpBdeCount > 2) {
2607 dmzbuf = lpfc_sli_get_buff(phba, pring,
2608 irsp->unsli3.sli3Words[7]);
2609 lpfc_in_buf_free(phba, dmzbuf);
2610 }
2611
2612 return 1;
2613 }
2614
James Smart92d7f7b2007-06-17 19:56:38 -05002615 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
James Smart76bb24e2007-10-27 13:38:00 -04002616 if (irsp->ulpBdeCount != 0) {
2617 saveq->context2 = lpfc_sli_get_buff(phba, pring,
James Smart92d7f7b2007-06-17 19:56:38 -05002618 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002619 if (!saveq->context2)
2620 lpfc_printf_log(phba,
2621 KERN_ERR,
2622 LOG_SLI,
2623 "0341 Ring %d Cannot find buffer for "
2624 "an unsolicited iocb. tag 0x%x\n",
2625 pring->ringno,
2626 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002627 }
2628 if (irsp->ulpBdeCount == 2) {
2629 saveq->context3 = lpfc_sli_get_buff(phba, pring,
James Smart51ef4c22007-08-02 11:10:31 -04002630 irsp->unsli3.sli3Words[7]);
James Smart76bb24e2007-10-27 13:38:00 -04002631 if (!saveq->context3)
2632 lpfc_printf_log(phba,
2633 KERN_ERR,
2634 LOG_SLI,
2635 "0342 Ring %d Cannot find buffer for an"
2636 " unsolicited iocb. tag 0x%x\n",
2637 pring->ringno,
2638 irsp->unsli3.sli3Words[7]);
2639 }
2640 list_for_each_entry(iocbq, &saveq->list, list) {
James Smart76bb24e2007-10-27 13:38:00 -04002641 irsp = &(iocbq->iocb);
James Smart76bb24e2007-10-27 13:38:00 -04002642 if (irsp->ulpBdeCount != 0) {
2643 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2644 irsp->un.ulpWord[3]);
James Smart9c2face2008-01-11 01:53:18 -05002645 if (!iocbq->context2)
James Smart76bb24e2007-10-27 13:38:00 -04002646 lpfc_printf_log(phba,
2647 KERN_ERR,
2648 LOG_SLI,
2649 "0343 Ring %d Cannot find "
2650 "buffer for an unsolicited iocb"
2651 ". tag 0x%x\n", pring->ringno,
2652 irsp->un.ulpWord[3]);
2653 }
2654 if (irsp->ulpBdeCount == 2) {
2655 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2656 irsp->unsli3.sli3Words[7]);
James Smart9c2face2008-01-11 01:53:18 -05002657 if (!iocbq->context3)
James Smart76bb24e2007-10-27 13:38:00 -04002658 lpfc_printf_log(phba,
2659 KERN_ERR,
2660 LOG_SLI,
2661 "0344 Ring %d Cannot find "
2662 "buffer for an unsolicited "
2663 "iocb. tag 0x%x\n",
2664 pring->ringno,
2665 irsp->unsli3.sli3Words[7]);
2666 }
2667 }
James Smart92d7f7b2007-06-17 19:56:38 -05002668 }
James Smart9c2face2008-01-11 01:53:18 -05002669 if (irsp->ulpBdeCount != 0 &&
2670 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2671 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2672 int found = 0;
2673
2674 /* search continue save q for same XRI */
2675 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
James Smart7851fe22011-07-22 18:36:52 -04002676 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2677 saveq->iocb.unsli3.rcvsli3.ox_id) {
James Smart9c2face2008-01-11 01:53:18 -05002678 list_add_tail(&saveq->list, &iocbq->list);
2679 found = 1;
2680 break;
2681 }
2682 }
2683 if (!found)
2684 list_add_tail(&saveq->clist,
2685 &pring->iocb_continue_saveq);
2686 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2687 list_del_init(&iocbq->clist);
2688 saveq = iocbq;
2689 irsp = &(saveq->iocb);
2690 } else
2691 return 0;
2692 }
2693 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2694 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2695 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002696 Rctl = FC_RCTL_ELS_REQ;
2697 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002698 } else {
2699 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2700 Rctl = w5p->hcsw.Rctl;
2701 Type = w5p->hcsw.Type;
2702
2703 /* Firmware Workaround */
2704 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2705 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2706 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002707 Rctl = FC_RCTL_ELS_REQ;
2708 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002709 w5p->hcsw.Rctl = Rctl;
2710 w5p->hcsw.Type = Type;
2711 }
2712 }
James Smart92d7f7b2007-06-17 19:56:38 -05002713
James Smart3772a992009-05-22 14:50:54 -04002714 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
James Smart92d7f7b2007-06-17 19:56:38 -05002715 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002716 "0313 Ring %d handler: unexpected Rctl x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05002717 "Type x%x received\n",
James Smarte8b62012007-08-02 11:10:09 -04002718 pring->ringno, Rctl, Type);
James Smart3772a992009-05-22 14:50:54 -04002719
James Smart92d7f7b2007-06-17 19:56:38 -05002720 return 1;
dea31012005-04-17 16:05:31 -05002721}
2722
James Smarte59058c2008-08-24 21:49:00 -04002723/**
James Smart3621a712009-04-06 18:47:14 -04002724 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
James Smarte59058c2008-08-24 21:49:00 -04002725 * @phba: Pointer to HBA context object.
2726 * @pring: Pointer to driver SLI ring object.
2727 * @prspiocb: Pointer to response iocb object.
2728 *
2729 * This function looks up the iocb_lookup table to get the command iocb
2730 * corresponding to the given response iocb using the iotag of the
2731 * response iocb. This function is called with the hbalock held.
2732 * This function returns the command iocb object if it finds the command
2733 * iocb else returns NULL.
2734 **/
dea31012005-04-17 16:05:31 -05002735static struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05002736lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2737 struct lpfc_sli_ring *pring,
2738 struct lpfc_iocbq *prspiocb)
dea31012005-04-17 16:05:31 -05002739{
dea31012005-04-17 16:05:31 -05002740 struct lpfc_iocbq *cmd_iocb = NULL;
2741 uint16_t iotag;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002742 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05002743
James Bottomley604a3e32005-10-29 10:28:33 -05002744 iotag = prspiocb->iocb.ulpIoTag;
dea31012005-04-17 16:05:31 -05002745
James Bottomley604a3e32005-10-29 10:28:33 -05002746 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2747 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04002748 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
James Smart89533e92016-10-13 15:06:15 -07002749 /* remove from txcmpl queue list */
2750 list_del_init(&cmd_iocb->list);
James Smart4f2e66c2012-05-09 21:17:07 -04002751 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smart89533e92016-10-13 15:06:15 -07002752 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04002753 }
dea31012005-04-17 16:05:31 -05002754 }
2755
dea31012005-04-17 16:05:31 -05002756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart89533e92016-10-13 15:06:15 -07002757 "0317 iotag x%x is out of "
James Bottomley604a3e32005-10-29 10:28:33 -05002758 "range: max iotag x%x wd0 x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04002759 iotag, phba->sli.last_iotag,
James Bottomley604a3e32005-10-29 10:28:33 -05002760 *(((uint32_t *) &prspiocb->iocb) + 7));
dea31012005-04-17 16:05:31 -05002761 return NULL;
2762}
2763
James Smarte59058c2008-08-24 21:49:00 -04002764/**
James Smart3772a992009-05-22 14:50:54 -04002765 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2766 * @phba: Pointer to HBA context object.
2767 * @pring: Pointer to driver SLI ring object.
2768 * @iotag: IOCB tag.
2769 *
2770 * This function looks up the iocb_lookup table to get the command iocb
2771 * corresponding to the given iotag. This function is called with the
2772 * hbalock held.
2773 * This function returns the command iocb object if it finds the command
2774 * iocb else returns NULL.
2775 **/
2776static struct lpfc_iocbq *
2777lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2778 struct lpfc_sli_ring *pring, uint16_t iotag)
2779{
James Smart895427b2017-02-12 13:52:30 -08002780 struct lpfc_iocbq *cmd_iocb = NULL;
James Smart3772a992009-05-22 14:50:54 -04002781
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002782 lockdep_assert_held(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04002783 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2784 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04002785 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2786 /* remove from txcmpl queue list */
2787 list_del_init(&cmd_iocb->list);
2788 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smart4f2e66c2012-05-09 21:17:07 -04002789 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04002790 }
James Smart3772a992009-05-22 14:50:54 -04002791 }
James Smart89533e92016-10-13 15:06:15 -07002792
James Smart3772a992009-05-22 14:50:54 -04002793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08002794 "0372 iotag x%x lookup error: max iotag (x%x) "
2795 "iocb_flag x%x\n",
2796 iotag, phba->sli.last_iotag,
2797 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
James Smart3772a992009-05-22 14:50:54 -04002798 return NULL;
2799}
2800
2801/**
James Smart3621a712009-04-06 18:47:14 -04002802 * lpfc_sli_process_sol_iocb - process solicited iocb completion
James Smarte59058c2008-08-24 21:49:00 -04002803 * @phba: Pointer to HBA context object.
2804 * @pring: Pointer to driver SLI ring object.
2805 * @saveq: Pointer to the response iocb to be processed.
2806 *
2807 * This function is called by the ring event handler for non-fcp
2808 * rings when there is a new response iocb in the response ring.
2809 * The caller is not required to hold any locks. This function
2810 * gets the command iocb associated with the response iocb and
2811 * calls the completion handler for the command iocb. If there
2812 * is no completion handler, the function will free the resources
2813 * associated with command iocb. If the response iocb is for
2814 * an already aborted command iocb, the status of the completion
2815 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2816 * This function always returns 1.
2817 **/
dea31012005-04-17 16:05:31 -05002818static int
James Smart2e0fef82007-06-17 19:56:36 -05002819lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea31012005-04-17 16:05:31 -05002820 struct lpfc_iocbq *saveq)
2821{
James Smart2e0fef82007-06-17 19:56:36 -05002822 struct lpfc_iocbq *cmdiocbp;
dea31012005-04-17 16:05:31 -05002823 int rc = 1;
2824 unsigned long iflag;
2825
2826 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
James Smart2e0fef82007-06-17 19:56:36 -05002827 spin_lock_irqsave(&phba->hbalock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05002828 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
James Smart2e0fef82007-06-17 19:56:36 -05002829 spin_unlock_irqrestore(&phba->hbalock, iflag);
2830
dea31012005-04-17 16:05:31 -05002831 if (cmdiocbp) {
2832 if (cmdiocbp->iocb_cmpl) {
2833 /*
James Smartea2151b2008-09-07 11:52:10 -04002834 * If an ELS command failed send an event to mgmt
2835 * application.
2836 */
2837 if (saveq->iocb.ulpStatus &&
2838 (pring->ringno == LPFC_ELS_RING) &&
2839 (cmdiocbp->iocb.ulpCommand ==
2840 CMD_ELS_REQUEST64_CR))
2841 lpfc_send_els_failure_event(phba,
2842 cmdiocbp, saveq);
2843
2844 /*
dea31012005-04-17 16:05:31 -05002845 * Post all ELS completions to the worker thread.
2846 * All other are passed to the completion callback.
2847 */
2848 if (pring->ringno == LPFC_ELS_RING) {
James Smart341af102010-01-26 23:07:37 -05002849 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2850 (cmdiocbp->iocb_flag &
2851 LPFC_DRIVER_ABORTED)) {
2852 spin_lock_irqsave(&phba->hbalock,
2853 iflag);
James Smart07951072007-04-25 09:51:38 -04002854 cmdiocbp->iocb_flag &=
2855 ~LPFC_DRIVER_ABORTED;
James Smart341af102010-01-26 23:07:37 -05002856 spin_unlock_irqrestore(&phba->hbalock,
2857 iflag);
James Smart07951072007-04-25 09:51:38 -04002858 saveq->iocb.ulpStatus =
2859 IOSTAT_LOCAL_REJECT;
2860 saveq->iocb.un.ulpWord[4] =
2861 IOERR_SLI_ABORTED;
James Smart0ff10d42008-01-11 01:52:36 -05002862
2863 /* Firmware could still be in progress
2864 * of DMAing payload, so don't free data
2865 * buffer till after a hbeat.
2866 */
James Smart341af102010-01-26 23:07:37 -05002867 spin_lock_irqsave(&phba->hbalock,
2868 iflag);
James Smart0ff10d42008-01-11 01:52:36 -05002869 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
James Smart341af102010-01-26 23:07:37 -05002870 spin_unlock_irqrestore(&phba->hbalock,
2871 iflag);
2872 }
James Smart0f65ff62010-02-26 14:14:23 -05002873 if (phba->sli_rev == LPFC_SLI_REV4) {
2874 if (saveq->iocb_flag &
2875 LPFC_EXCHANGE_BUSY) {
2876 /* Set cmdiocb flag for the
2877 * exchange busy so sgl (xri)
2878 * will not be released until
2879 * the abort xri is received
2880 * from hba.
2881 */
2882 spin_lock_irqsave(
2883 &phba->hbalock, iflag);
2884 cmdiocbp->iocb_flag |=
2885 LPFC_EXCHANGE_BUSY;
2886 spin_unlock_irqrestore(
2887 &phba->hbalock, iflag);
2888 }
2889 if (cmdiocbp->iocb_flag &
2890 LPFC_DRIVER_ABORTED) {
2891 /*
2892 * Clear LPFC_DRIVER_ABORTED
2893 * bit in case it was driver
2894 * initiated abort.
2895 */
2896 spin_lock_irqsave(
2897 &phba->hbalock, iflag);
2898 cmdiocbp->iocb_flag &=
2899 ~LPFC_DRIVER_ABORTED;
2900 spin_unlock_irqrestore(
2901 &phba->hbalock, iflag);
2902 cmdiocbp->iocb.ulpStatus =
2903 IOSTAT_LOCAL_REJECT;
2904 cmdiocbp->iocb.un.ulpWord[4] =
2905 IOERR_ABORT_REQUESTED;
2906 /*
2907 * For SLI4, irsiocb contains
2908 * NO_XRI in sli_xritag, it
2909 * shall not affect releasing
2910 * sgl (xri) process.
2911 */
2912 saveq->iocb.ulpStatus =
2913 IOSTAT_LOCAL_REJECT;
2914 saveq->iocb.un.ulpWord[4] =
2915 IOERR_SLI_ABORTED;
2916 spin_lock_irqsave(
2917 &phba->hbalock, iflag);
2918 saveq->iocb_flag |=
2919 LPFC_DELAY_MEM_FREE;
2920 spin_unlock_irqrestore(
2921 &phba->hbalock, iflag);
2922 }
James Smart07951072007-04-25 09:51:38 -04002923 }
dea31012005-04-17 16:05:31 -05002924 }
James Smart2e0fef82007-06-17 19:56:36 -05002925 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
James Bottomley604a3e32005-10-29 10:28:33 -05002926 } else
2927 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea31012005-04-17 16:05:31 -05002928 } else {
2929 /*
2930 * Unknown initiating command based on the response iotag.
2931 * This could be the case on the ELS ring because of
2932 * lpfc_els_abort().
2933 */
2934 if (pring->ringno != LPFC_ELS_RING) {
2935 /*
2936 * Ring <ringno> handler: unexpected completion IoTag
2937 * <IoTag>
2938 */
James Smarta257bf92009-04-06 18:48:10 -04002939 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002940 "0322 Ring %d handler: "
2941 "unexpected completion IoTag x%x "
2942 "Data: x%x x%x x%x x%x\n",
2943 pring->ringno,
2944 saveq->iocb.ulpIoTag,
2945 saveq->iocb.ulpStatus,
2946 saveq->iocb.un.ulpWord[4],
2947 saveq->iocb.ulpCommand,
2948 saveq->iocb.ulpContext);
dea31012005-04-17 16:05:31 -05002949 }
2950 }
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04002951
dea31012005-04-17 16:05:31 -05002952 return rc;
2953}
2954
James Smarte59058c2008-08-24 21:49:00 -04002955/**
James Smart3621a712009-04-06 18:47:14 -04002956 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
James Smarte59058c2008-08-24 21:49:00 -04002957 * @phba: Pointer to HBA context object.
2958 * @pring: Pointer to driver SLI ring object.
2959 *
2960 * This function is called from the iocb ring event handlers when
2961 * put pointer is ahead of the get pointer for a ring. This function signal
2962 * an error attention condition to the worker thread and the worker
2963 * thread will transition the HBA to offline state.
2964 **/
James Smart2e0fef82007-06-17 19:56:36 -05002965static void
2966lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002967{
James Smart34b02dc2008-08-24 21:49:55 -04002968 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002969 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02002970 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002971 * rsp ring <portRspMax>
2972 */
2973 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002974 "0312 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02002975 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04002976 pring->ringno, le32_to_cpu(pgp->rspPutInx),
James Smart7e56aa22012-08-03 12:35:34 -04002977 pring->sli.sli3.numRiocb);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002978
James Smart2e0fef82007-06-17 19:56:36 -05002979 phba->link_state = LPFC_HBA_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002980
2981 /*
2982 * All error attention handlers are posted to
2983 * worker thread
2984 */
2985 phba->work_ha |= HA_ERATT;
2986 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05002987
James Smart5e9d9b82008-06-14 22:52:53 -04002988 lpfc_worker_wake_up(phba);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002989
2990 return;
2991}
2992
James Smarte59058c2008-08-24 21:49:00 -04002993/**
James Smart3621a712009-04-06 18:47:14 -04002994 * lpfc_poll_eratt - Error attention polling timer timeout handler
James Smart93996272008-08-24 21:50:30 -04002995 * @ptr: Pointer to address of HBA context object.
2996 *
2997 * This function is invoked by the Error Attention polling timer when the
2998 * timer times out. It will check the SLI Error Attention register for
2999 * possible attention events. If so, it will post an Error Attention event
3000 * and wake up worker thread to process it. Otherwise, it will set up the
3001 * Error Attention polling timer for the next poll.
3002 **/
3003void lpfc_poll_eratt(unsigned long ptr)
3004{
3005 struct lpfc_hba *phba;
James Smarteb016562014-09-03 12:58:06 -04003006 uint32_t eratt = 0;
James Smartaa6fbb72012-08-03 12:36:03 -04003007 uint64_t sli_intr, cnt;
James Smart93996272008-08-24 21:50:30 -04003008
3009 phba = (struct lpfc_hba *)ptr;
3010
James Smartaa6fbb72012-08-03 12:36:03 -04003011 /* Here we will also keep track of interrupts per sec of the hba */
3012 sli_intr = phba->sli.slistat.sli_intr;
3013
3014 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3015 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3016 sli_intr);
3017 else
3018 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3019
James Smart65791f12016-07-06 12:35:56 -07003020 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3021 do_div(cnt, phba->eratt_poll_interval);
James Smartaa6fbb72012-08-03 12:36:03 -04003022 phba->sli.slistat.sli_ips = cnt;
3023
3024 phba->sli.slistat.sli_prev_intr = sli_intr;
3025
James Smart93996272008-08-24 21:50:30 -04003026 /* Check chip HA register for error event */
3027 eratt = lpfc_sli_check_eratt(phba);
3028
3029 if (eratt)
3030 /* Tell the worker thread there is work to do */
3031 lpfc_worker_wake_up(phba);
3032 else
3033 /* Restart the timer for next eratt poll */
James Smart256ec0d2013-04-17 20:14:58 -04003034 mod_timer(&phba->eratt_poll,
3035 jiffies +
James Smart65791f12016-07-06 12:35:56 -07003036 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smart93996272008-08-24 21:50:30 -04003037 return;
3038}
3039
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003040
James Smarte59058c2008-08-24 21:49:00 -04003041/**
James Smart3621a712009-04-06 18:47:14 -04003042 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
James Smarte59058c2008-08-24 21:49:00 -04003043 * @phba: Pointer to HBA context object.
3044 * @pring: Pointer to driver SLI ring object.
3045 * @mask: Host attention register mask for this ring.
3046 *
3047 * This function is called from the interrupt context when there is a ring
3048 * event for the fcp ring. The caller does not hold any lock.
3049 * The function processes each response iocb in the response ring until it
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003050 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
James Smarte59058c2008-08-24 21:49:00 -04003051 * LE bit set. The function will call the completion handler of the command iocb
3052 * if the response iocb indicates a completion for a command iocb or it is
3053 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3054 * function if this is an unsolicited iocb.
dea31012005-04-17 16:05:31 -05003055 * This routine presumes LPFC_FCP_RING handling and doesn't bother
James Smart45ed1192009-10-02 15:17:02 -04003056 * to check it explicitly.
3057 */
3058int
James Smart2e0fef82007-06-17 19:56:36 -05003059lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3060 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003061{
James Smart34b02dc2008-08-24 21:49:55 -04003062 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea31012005-04-17 16:05:31 -05003063 IOCB_t *irsp = NULL;
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003064 IOCB_t *entry = NULL;
dea31012005-04-17 16:05:31 -05003065 struct lpfc_iocbq *cmdiocbq = NULL;
3066 struct lpfc_iocbq rspiocbq;
dea31012005-04-17 16:05:31 -05003067 uint32_t status;
3068 uint32_t portRspPut, portRspMax;
3069 int rc = 1;
3070 lpfc_iocb_type type;
3071 unsigned long iflag;
3072 uint32_t rsp_cmpl = 0;
dea31012005-04-17 16:05:31 -05003073
James Smart2e0fef82007-06-17 19:56:36 -05003074 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003075 pring->stats.iocb_event++;
3076
dea31012005-04-17 16:05:31 -05003077 /*
3078 * The next available response entry should never exceed the maximum
3079 * entries. If it does, treat it as an adapter hardware error.
3080 */
James Smart7e56aa22012-08-03 12:35:34 -04003081 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003082 portRspPut = le32_to_cpu(pgp->rspPutInx);
3083 if (unlikely(portRspPut >= portRspMax)) {
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003084 lpfc_sli_rsp_pointers_error(phba, pring);
James Smart2e0fef82007-06-17 19:56:36 -05003085 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003086 return 1;
3087 }
James Smart45ed1192009-10-02 15:17:02 -04003088 if (phba->fcp_ring_in_use) {
3089 spin_unlock_irqrestore(&phba->hbalock, iflag);
3090 return 1;
3091 } else
3092 phba->fcp_ring_in_use = 1;
dea31012005-04-17 16:05:31 -05003093
3094 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003095 while (pring->sli.sli3.rspidx != portRspPut) {
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003096 /*
3097 * Fetch an entry off the ring and copy it into a local data
3098 * structure. The copy involves a byte-swap since the
3099 * network byte order and pci byte orders are different.
3100 */
James Smarted957682007-06-17 19:56:37 -05003101 entry = lpfc_resp_iocb(phba, pring);
James Smart858c9f62007-06-17 19:56:39 -05003102 phba->last_completion_time = jiffies;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003103
James Smart7e56aa22012-08-03 12:35:34 -04003104 if (++pring->sli.sli3.rspidx >= portRspMax)
3105 pring->sli.sli3.rspidx = 0;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003106
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003107 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3108 (uint32_t *) &rspiocbq.iocb,
James Smarted957682007-06-17 19:56:37 -05003109 phba->iocb_rsp_size);
James Smarta4bc3372006-12-02 13:34:16 -05003110 INIT_LIST_HEAD(&(rspiocbq.list));
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003111 irsp = &rspiocbq.iocb;
3112
dea31012005-04-17 16:05:31 -05003113 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3114 pring->stats.iocb_rsp++;
3115 rsp_cmpl++;
3116
3117 if (unlikely(irsp->ulpStatus)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003118 /*
3119 * If resource errors reported from HBA, reduce
3120 * queuedepths of the SCSI device.
3121 */
3122 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003123 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3124 IOERR_NO_RESOURCES)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003125 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003126 phba->lpfc_rampdown_queue_depth(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05003127 spin_lock_irqsave(&phba->hbalock, iflag);
3128 }
3129
dea31012005-04-17 16:05:31 -05003130 /* Rsp ring <ringno> error: IOCB */
3131 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003132 "0336 Rsp Ring %d error: IOCB Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05003133 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003134 pring->ringno,
James Smart92d7f7b2007-06-17 19:56:38 -05003135 irsp->un.ulpWord[0],
3136 irsp->un.ulpWord[1],
3137 irsp->un.ulpWord[2],
3138 irsp->un.ulpWord[3],
3139 irsp->un.ulpWord[4],
3140 irsp->un.ulpWord[5],
James Smartd7c255b2008-08-24 21:50:00 -04003141 *(uint32_t *)&irsp->un1,
3142 *((uint32_t *)&irsp->un1 + 1));
dea31012005-04-17 16:05:31 -05003143 }
3144
3145 switch (type) {
3146 case LPFC_ABORT_IOCB:
3147 case LPFC_SOL_IOCB:
3148 /*
3149 * Idle exchange closed via ABTS from port. No iocb
3150 * resources need to be recovered.
3151 */
3152 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
James Smartdca94792006-08-01 07:34:08 -04003153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003154 "0333 IOCB cmd 0x%x"
James Smartdca94792006-08-01 07:34:08 -04003155 " processed. Skipping"
James Smart92d7f7b2007-06-17 19:56:38 -05003156 " completion\n",
James Smartdca94792006-08-01 07:34:08 -04003157 irsp->ulpCommand);
dea31012005-04-17 16:05:31 -05003158 break;
3159 }
3160
James Bottomley604a3e32005-10-29 10:28:33 -05003161 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3162 &rspiocbq);
James Smart0f65ff62010-02-26 14:14:23 -05003163 if (unlikely(!cmdiocbq))
3164 break;
3165 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3166 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3167 if (cmdiocbq->iocb_cmpl) {
3168 spin_unlock_irqrestore(&phba->hbalock, iflag);
3169 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3170 &rspiocbq);
3171 spin_lock_irqsave(&phba->hbalock, iflag);
3172 }
dea31012005-04-17 16:05:31 -05003173 break;
James Smarta4bc3372006-12-02 13:34:16 -05003174 case LPFC_UNSOL_IOCB:
James Smart2e0fef82007-06-17 19:56:36 -05003175 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003176 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
James Smart2e0fef82007-06-17 19:56:36 -05003177 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003178 break;
dea31012005-04-17 16:05:31 -05003179 default:
3180 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3181 char adaptermsg[LPFC_MAX_ADPTMSG];
3182 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3183 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3184 MAX_MSG_DATA);
Joe Perches898eb712007-10-18 03:06:30 -07003185 dev_warn(&((phba->pcidev)->dev),
3186 "lpfc%d: %s\n",
dea31012005-04-17 16:05:31 -05003187 phba->brd_no, adaptermsg);
3188 } else {
3189 /* Unknown IOCB command */
3190 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003191 "0334 Unknown IOCB command "
James Smart92d7f7b2007-06-17 19:56:38 -05003192 "Data: x%x, x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003193 type, irsp->ulpCommand,
James Smart92d7f7b2007-06-17 19:56:38 -05003194 irsp->ulpStatus,
3195 irsp->ulpIoTag,
3196 irsp->ulpContext);
dea31012005-04-17 16:05:31 -05003197 }
3198 break;
3199 }
3200
3201 /*
3202 * The response IOCB has been processed. Update the ring
3203 * pointer in SLIM. If the port response put pointer has not
3204 * been updated, sync the pgp->rspPutInx and fetch the new port
3205 * response put pointer.
3206 */
James Smart7e56aa22012-08-03 12:35:34 -04003207 writel(pring->sli.sli3.rspidx,
3208 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003209
James Smart7e56aa22012-08-03 12:35:34 -04003210 if (pring->sli.sli3.rspidx == portRspPut)
dea31012005-04-17 16:05:31 -05003211 portRspPut = le32_to_cpu(pgp->rspPutInx);
3212 }
3213
3214 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3215 pring->stats.iocb_rsp_full++;
3216 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3217 writel(status, phba->CAregaddr);
3218 readl(phba->CAregaddr);
3219 }
3220 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3221 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3222 pring->stats.iocb_cmd_empty++;
3223
3224 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003225 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003226 lpfc_sli_resume_iocb(phba, pring);
3227
3228 if ((pring->lpfc_sli_cmd_available))
3229 (pring->lpfc_sli_cmd_available) (phba, pring);
3230
3231 }
3232
James Smart45ed1192009-10-02 15:17:02 -04003233 phba->fcp_ring_in_use = 0;
James Smart2e0fef82007-06-17 19:56:36 -05003234 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003235 return rc;
3236}
3237
James Smarte59058c2008-08-24 21:49:00 -04003238/**
James Smart3772a992009-05-22 14:50:54 -04003239 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3240 * @phba: Pointer to HBA context object.
3241 * @pring: Pointer to driver SLI ring object.
3242 * @rspiocbp: Pointer to driver response IOCB object.
3243 *
3244 * This function is called from the worker thread when there is a slow-path
3245 * response IOCB to process. This function chains all the response iocbs until
3246 * seeing the iocb with the LE bit set. The function will call
3247 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3248 * completion of a command iocb. The function will call the
3249 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3250 * The function frees the resources or calls the completion handler if this
3251 * iocb is an abort completion. The function returns NULL when the response
3252 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3253 * this function shall chain the iocb on to the iocb_continueq and return the
3254 * response iocb passed in.
3255 **/
3256static struct lpfc_iocbq *
3257lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3258 struct lpfc_iocbq *rspiocbp)
3259{
3260 struct lpfc_iocbq *saveq;
3261 struct lpfc_iocbq *cmdiocbp;
3262 struct lpfc_iocbq *next_iocb;
3263 IOCB_t *irsp = NULL;
3264 uint32_t free_saveq;
3265 uint8_t iocb_cmd_type;
3266 lpfc_iocb_type type;
3267 unsigned long iflag;
3268 int rc;
3269
3270 spin_lock_irqsave(&phba->hbalock, iflag);
3271 /* First add the response iocb to the countinueq list */
3272 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3273 pring->iocb_continueq_cnt++;
3274
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02003275 /* Now, determine whether the list is completed for processing */
James Smart3772a992009-05-22 14:50:54 -04003276 irsp = &rspiocbp->iocb;
3277 if (irsp->ulpLe) {
3278 /*
3279 * By default, the driver expects to free all resources
3280 * associated with this iocb completion.
3281 */
3282 free_saveq = 1;
3283 saveq = list_get_first(&pring->iocb_continueq,
3284 struct lpfc_iocbq, list);
3285 irsp = &(saveq->iocb);
3286 list_del_init(&pring->iocb_continueq);
3287 pring->iocb_continueq_cnt = 0;
3288
3289 pring->stats.iocb_rsp++;
3290
3291 /*
3292 * If resource errors reported from HBA, reduce
3293 * queuedepths of the SCSI device.
3294 */
3295 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003296 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3297 IOERR_NO_RESOURCES)) {
James Smart3772a992009-05-22 14:50:54 -04003298 spin_unlock_irqrestore(&phba->hbalock, iflag);
3299 phba->lpfc_rampdown_queue_depth(phba);
3300 spin_lock_irqsave(&phba->hbalock, iflag);
3301 }
3302
3303 if (irsp->ulpStatus) {
3304 /* Rsp ring <ringno> error: IOCB */
3305 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3306 "0328 Rsp Ring %d error: "
3307 "IOCB Data: "
3308 "x%x x%x x%x x%x "
3309 "x%x x%x x%x x%x "
3310 "x%x x%x x%x x%x "
3311 "x%x x%x x%x x%x\n",
3312 pring->ringno,
3313 irsp->un.ulpWord[0],
3314 irsp->un.ulpWord[1],
3315 irsp->un.ulpWord[2],
3316 irsp->un.ulpWord[3],
3317 irsp->un.ulpWord[4],
3318 irsp->un.ulpWord[5],
3319 *(((uint32_t *) irsp) + 6),
3320 *(((uint32_t *) irsp) + 7),
3321 *(((uint32_t *) irsp) + 8),
3322 *(((uint32_t *) irsp) + 9),
3323 *(((uint32_t *) irsp) + 10),
3324 *(((uint32_t *) irsp) + 11),
3325 *(((uint32_t *) irsp) + 12),
3326 *(((uint32_t *) irsp) + 13),
3327 *(((uint32_t *) irsp) + 14),
3328 *(((uint32_t *) irsp) + 15));
3329 }
3330
3331 /*
3332 * Fetch the IOCB command type and call the correct completion
3333 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3334 * get freed back to the lpfc_iocb_list by the discovery
3335 * kernel thread.
3336 */
3337 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3338 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3339 switch (type) {
3340 case LPFC_SOL_IOCB:
3341 spin_unlock_irqrestore(&phba->hbalock, iflag);
3342 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3343 spin_lock_irqsave(&phba->hbalock, iflag);
3344 break;
3345
3346 case LPFC_UNSOL_IOCB:
3347 spin_unlock_irqrestore(&phba->hbalock, iflag);
3348 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3349 spin_lock_irqsave(&phba->hbalock, iflag);
3350 if (!rc)
3351 free_saveq = 0;
3352 break;
3353
3354 case LPFC_ABORT_IOCB:
3355 cmdiocbp = NULL;
3356 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3357 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3358 saveq);
3359 if (cmdiocbp) {
3360 /* Call the specified completion routine */
3361 if (cmdiocbp->iocb_cmpl) {
3362 spin_unlock_irqrestore(&phba->hbalock,
3363 iflag);
3364 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3365 saveq);
3366 spin_lock_irqsave(&phba->hbalock,
3367 iflag);
3368 } else
3369 __lpfc_sli_release_iocbq(phba,
3370 cmdiocbp);
3371 }
3372 break;
3373
3374 case LPFC_UNKNOWN_IOCB:
3375 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3376 char adaptermsg[LPFC_MAX_ADPTMSG];
3377 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3378 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3379 MAX_MSG_DATA);
3380 dev_warn(&((phba->pcidev)->dev),
3381 "lpfc%d: %s\n",
3382 phba->brd_no, adaptermsg);
3383 } else {
3384 /* Unknown IOCB command */
3385 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3386 "0335 Unknown IOCB "
3387 "command Data: x%x "
3388 "x%x x%x x%x\n",
3389 irsp->ulpCommand,
3390 irsp->ulpStatus,
3391 irsp->ulpIoTag,
3392 irsp->ulpContext);
3393 }
3394 break;
3395 }
3396
3397 if (free_saveq) {
3398 list_for_each_entry_safe(rspiocbp, next_iocb,
3399 &saveq->list, list) {
James Smart61f35bf2013-05-31 17:03:48 -04003400 list_del_init(&rspiocbp->list);
James Smart3772a992009-05-22 14:50:54 -04003401 __lpfc_sli_release_iocbq(phba, rspiocbp);
3402 }
3403 __lpfc_sli_release_iocbq(phba, saveq);
3404 }
3405 rspiocbp = NULL;
3406 }
3407 spin_unlock_irqrestore(&phba->hbalock, iflag);
3408 return rspiocbp;
3409}
3410
3411/**
3412 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
James Smarte59058c2008-08-24 21:49:00 -04003413 * @phba: Pointer to HBA context object.
3414 * @pring: Pointer to driver SLI ring object.
3415 * @mask: Host attention register mask for this ring.
3416 *
James Smart3772a992009-05-22 14:50:54 -04003417 * This routine wraps the actual slow_ring event process routine from the
3418 * API jump table function pointer from the lpfc_hba struct.
James Smarte59058c2008-08-24 21:49:00 -04003419 **/
James Smart3772a992009-05-22 14:50:54 -04003420void
James Smart2e0fef82007-06-17 19:56:36 -05003421lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3422 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003423{
James Smart3772a992009-05-22 14:50:54 -04003424 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3425}
3426
3427/**
3428 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3429 * @phba: Pointer to HBA context object.
3430 * @pring: Pointer to driver SLI ring object.
3431 * @mask: Host attention register mask for this ring.
3432 *
3433 * This function is called from the worker thread when there is a ring event
3434 * for non-fcp rings. The caller does not hold any lock. The function will
3435 * remove each response iocb in the response ring and calls the handle
3436 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3437 **/
3438static void
3439lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3440 struct lpfc_sli_ring *pring, uint32_t mask)
3441{
James Smart34b02dc2008-08-24 21:49:55 -04003442 struct lpfc_pgp *pgp;
dea31012005-04-17 16:05:31 -05003443 IOCB_t *entry;
3444 IOCB_t *irsp = NULL;
3445 struct lpfc_iocbq *rspiocbp = NULL;
dea31012005-04-17 16:05:31 -05003446 uint32_t portRspPut, portRspMax;
dea31012005-04-17 16:05:31 -05003447 unsigned long iflag;
James Smart3772a992009-05-22 14:50:54 -04003448 uint32_t status;
dea31012005-04-17 16:05:31 -05003449
James Smart34b02dc2008-08-24 21:49:55 -04003450 pgp = &phba->port_gp[pring->ringno];
James Smart2e0fef82007-06-17 19:56:36 -05003451 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003452 pring->stats.iocb_event++;
3453
dea31012005-04-17 16:05:31 -05003454 /*
3455 * The next available response entry should never exceed the maximum
3456 * entries. If it does, treat it as an adapter hardware error.
3457 */
James Smart7e56aa22012-08-03 12:35:34 -04003458 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003459 portRspPut = le32_to_cpu(pgp->rspPutInx);
3460 if (portRspPut >= portRspMax) {
3461 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003462 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea31012005-04-17 16:05:31 -05003463 * rsp ring <portRspMax>
3464 */
James Smarted957682007-06-17 19:56:37 -05003465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003466 "0303 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003467 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04003468 pring->ringno, portRspPut, portRspMax);
dea31012005-04-17 16:05:31 -05003469
James Smart2e0fef82007-06-17 19:56:36 -05003470 phba->link_state = LPFC_HBA_ERROR;
3471 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003472
3473 phba->work_hs = HS_FFER3;
3474 lpfc_handle_eratt(phba);
3475
James Smart3772a992009-05-22 14:50:54 -04003476 return;
dea31012005-04-17 16:05:31 -05003477 }
3478
3479 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003480 while (pring->sli.sli3.rspidx != portRspPut) {
dea31012005-04-17 16:05:31 -05003481 /*
3482 * Build a completion list and call the appropriate handler.
3483 * The process is to get the next available response iocb, get
3484 * a free iocb from the list, copy the response data into the
3485 * free iocb, insert to the continuation list, and update the
3486 * next response index to slim. This process makes response
3487 * iocb's in the ring available to DMA as fast as possible but
3488 * pays a penalty for a copy operation. Since the iocb is
3489 * only 32 bytes, this penalty is considered small relative to
3490 * the PCI reads for register values and a slim write. When
3491 * the ulpLe field is set, the entire Command has been
3492 * received.
3493 */
James Smarted957682007-06-17 19:56:37 -05003494 entry = lpfc_resp_iocb(phba, pring);
3495
James Smart858c9f62007-06-17 19:56:39 -05003496 phba->last_completion_time = jiffies;
James Smart2e0fef82007-06-17 19:56:36 -05003497 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05003498 if (rspiocbp == NULL) {
3499 printk(KERN_ERR "%s: out of buffers! Failing "
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07003500 "completion.\n", __func__);
dea31012005-04-17 16:05:31 -05003501 break;
3502 }
3503
James Smarted957682007-06-17 19:56:37 -05003504 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3505 phba->iocb_rsp_size);
dea31012005-04-17 16:05:31 -05003506 irsp = &rspiocbp->iocb;
3507
James Smart7e56aa22012-08-03 12:35:34 -04003508 if (++pring->sli.sli3.rspidx >= portRspMax)
3509 pring->sli.sli3.rspidx = 0;
dea31012005-04-17 16:05:31 -05003510
James Smarta58cbd52007-08-02 11:09:43 -04003511 if (pring->ringno == LPFC_ELS_RING) {
3512 lpfc_debugfs_slow_ring_trc(phba,
3513 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3514 *(((uint32_t *) irsp) + 4),
3515 *(((uint32_t *) irsp) + 6),
3516 *(((uint32_t *) irsp) + 7));
3517 }
3518
James Smart7e56aa22012-08-03 12:35:34 -04003519 writel(pring->sli.sli3.rspidx,
3520 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003521
James Smart3772a992009-05-22 14:50:54 -04003522 spin_unlock_irqrestore(&phba->hbalock, iflag);
3523 /* Handle the response IOCB */
3524 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3525 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003526
3527 /*
3528 * If the port response put pointer has not been updated, sync
3529 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3530 * response put pointer.
3531 */
James Smart7e56aa22012-08-03 12:35:34 -04003532 if (pring->sli.sli3.rspidx == portRspPut) {
dea31012005-04-17 16:05:31 -05003533 portRspPut = le32_to_cpu(pgp->rspPutInx);
3534 }
James Smart7e56aa22012-08-03 12:35:34 -04003535 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea31012005-04-17 16:05:31 -05003536
James Smart92d7f7b2007-06-17 19:56:38 -05003537 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea31012005-04-17 16:05:31 -05003538 /* At least one response entry has been freed */
3539 pring->stats.iocb_rsp_full++;
3540 /* SET RxRE_RSP in Chip Att register */
3541 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3542 writel(status, phba->CAregaddr);
3543 readl(phba->CAregaddr); /* flush */
3544 }
3545 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3546 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3547 pring->stats.iocb_cmd_empty++;
3548
3549 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003550 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003551 lpfc_sli_resume_iocb(phba, pring);
3552
3553 if ((pring->lpfc_sli_cmd_available))
3554 (pring->lpfc_sli_cmd_available) (phba, pring);
3555
3556 }
3557
James Smart2e0fef82007-06-17 19:56:36 -05003558 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003559 return;
dea31012005-04-17 16:05:31 -05003560}
3561
James Smarte59058c2008-08-24 21:49:00 -04003562/**
James Smart4f774512009-05-22 14:52:35 -04003563 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3564 * @phba: Pointer to HBA context object.
3565 * @pring: Pointer to driver SLI ring object.
3566 * @mask: Host attention register mask for this ring.
3567 *
3568 * This function is called from the worker thread when there is a pending
3569 * ELS response iocb on the driver internal slow-path response iocb worker
3570 * queue. The caller does not hold any lock. The function will remove each
3571 * response iocb from the response worker queue and calls the handle
3572 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3573 **/
3574static void
3575lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3576 struct lpfc_sli_ring *pring, uint32_t mask)
3577{
3578 struct lpfc_iocbq *irspiocbq;
James Smart4d9ab992009-10-02 15:16:39 -04003579 struct hbq_dmabuf *dmabuf;
3580 struct lpfc_cq_event *cq_event;
James Smart4f774512009-05-22 14:52:35 -04003581 unsigned long iflag;
3582
James Smart45ed1192009-10-02 15:17:02 -04003583 spin_lock_irqsave(&phba->hbalock, iflag);
3584 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3585 spin_unlock_irqrestore(&phba->hbalock, iflag);
3586 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
James Smart4f774512009-05-22 14:52:35 -04003587 /* Get the response iocb from the head of work queue */
3588 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart45ed1192009-10-02 15:17:02 -04003589 list_remove_head(&phba->sli4_hba.sp_queue_event,
James Smart4d9ab992009-10-02 15:16:39 -04003590 cq_event, struct lpfc_cq_event, list);
James Smart4f774512009-05-22 14:52:35 -04003591 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart4d9ab992009-10-02 15:16:39 -04003592
3593 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3594 case CQE_CODE_COMPL_WQE:
3595 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3596 cq_event);
James Smart45ed1192009-10-02 15:17:02 -04003597 /* Translate ELS WCQE to response IOCBQ */
3598 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3599 irspiocbq);
3600 if (irspiocbq)
3601 lpfc_sli_sp_handle_rspiocb(phba, pring,
3602 irspiocbq);
James Smart4d9ab992009-10-02 15:16:39 -04003603 break;
3604 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -04003605 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -04003606 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3607 cq_event);
3608 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3609 break;
3610 default:
3611 break;
3612 }
James Smart4f774512009-05-22 14:52:35 -04003613 }
3614}
3615
3616/**
James Smart3621a712009-04-06 18:47:14 -04003617 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
James Smarte59058c2008-08-24 21:49:00 -04003618 * @phba: Pointer to HBA context object.
3619 * @pring: Pointer to driver SLI ring object.
3620 *
3621 * This function aborts all iocbs in the given ring and frees all the iocb
3622 * objects in txq. This function issues an abort iocb for all the iocb commands
3623 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3624 * the return of this function. The caller is not required to hold any locks.
3625 **/
James Smart2e0fef82007-06-17 19:56:36 -05003626void
dea31012005-04-17 16:05:31 -05003627lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3628{
James Smart2534ba72007-04-25 09:52:20 -04003629 LIST_HEAD(completions);
dea31012005-04-17 16:05:31 -05003630 struct lpfc_iocbq *iocb, *next_iocb;
dea31012005-04-17 16:05:31 -05003631
James Smart92d7f7b2007-06-17 19:56:38 -05003632 if (pring->ringno == LPFC_ELS_RING) {
3633 lpfc_fabric_abort_hba(phba);
3634 }
3635
dea31012005-04-17 16:05:31 -05003636 /* Error everything on txq and txcmplq
3637 * First do the txq.
3638 */
James Smartdb55fba2014-04-04 13:52:02 -04003639 if (phba->sli_rev >= LPFC_SLI_REV4) {
3640 spin_lock_irq(&pring->ring_lock);
3641 list_splice_init(&pring->txq, &completions);
3642 pring->txq_cnt = 0;
3643 spin_unlock_irq(&pring->ring_lock);
dea31012005-04-17 16:05:31 -05003644
James Smartdb55fba2014-04-04 13:52:02 -04003645 spin_lock_irq(&phba->hbalock);
3646 /* Next issue ABTS for everything on the txcmplq */
3647 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3648 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3649 spin_unlock_irq(&phba->hbalock);
3650 } else {
3651 spin_lock_irq(&phba->hbalock);
3652 list_splice_init(&pring->txq, &completions);
3653 pring->txq_cnt = 0;
James Smart2534ba72007-04-25 09:52:20 -04003654
James Smartdb55fba2014-04-04 13:52:02 -04003655 /* Next issue ABTS for everything on the txcmplq */
3656 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3657 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3658 spin_unlock_irq(&phba->hbalock);
3659 }
James Smart2534ba72007-04-25 09:52:20 -04003660
James Smarta257bf92009-04-06 18:48:10 -04003661 /* Cancel all the IOCBs from the completions list */
3662 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3663 IOERR_SLI_ABORTED);
dea31012005-04-17 16:05:31 -05003664}
3665
James Smarte59058c2008-08-24 21:49:00 -04003666/**
James Smart895427b2017-02-12 13:52:30 -08003667 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3668 * @phba: Pointer to HBA context object.
3669 * @pring: Pointer to driver SLI ring object.
3670 *
3671 * This function aborts all iocbs in the given ring and frees all the iocb
3672 * objects in txq. This function issues an abort iocb for all the iocb commands
3673 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3674 * the return of this function. The caller is not required to hold any locks.
3675 **/
3676void
3677lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3678{
3679 LIST_HEAD(completions);
3680 struct lpfc_iocbq *iocb, *next_iocb;
3681
3682 if (pring->ringno == LPFC_ELS_RING)
3683 lpfc_fabric_abort_hba(phba);
3684
3685 spin_lock_irq(&phba->hbalock);
3686 /* Next issue ABTS for everything on the txcmplq */
3687 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3688 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3689 spin_unlock_irq(&phba->hbalock);
3690}
3691
3692
3693/**
James Smartdb55fba2014-04-04 13:52:02 -04003694 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3695 * @phba: Pointer to HBA context object.
3696 * @pring: Pointer to driver SLI ring object.
3697 *
3698 * This function aborts all iocbs in FCP rings and frees all the iocb
3699 * objects in txq. This function issues an abort iocb for all the iocb commands
3700 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3701 * the return of this function. The caller is not required to hold any locks.
3702 **/
3703void
3704lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3705{
3706 struct lpfc_sli *psli = &phba->sli;
3707 struct lpfc_sli_ring *pring;
3708 uint32_t i;
3709
3710 /* Look on all the FCP Rings for the iotag */
3711 if (phba->sli_rev >= LPFC_SLI_REV4) {
3712 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
James Smart895427b2017-02-12 13:52:30 -08003713 pring = phba->sli4_hba.fcp_wq[i]->pring;
James Smartdb55fba2014-04-04 13:52:02 -04003714 lpfc_sli_abort_iocb_ring(phba, pring);
3715 }
3716 } else {
James Smart895427b2017-02-12 13:52:30 -08003717 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04003718 lpfc_sli_abort_iocb_ring(phba, pring);
3719 }
3720}
3721
James Smart895427b2017-02-12 13:52:30 -08003722/**
3723 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3724 * @phba: Pointer to HBA context object.
3725 *
3726 * This function aborts all wqes in NVME rings. This function issues an
3727 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3728 * the txcmplq is not guaranteed to complete before the return of this
3729 * function. The caller is not required to hold any locks.
3730 **/
3731void
3732lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3733{
3734 struct lpfc_sli_ring *pring;
3735 uint32_t i;
3736
3737 if (phba->sli_rev < LPFC_SLI_REV4)
3738 return;
3739
3740 /* Abort all IO on each NVME ring. */
3741 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3742 pring = phba->sli4_hba.nvme_wq[i]->pring;
3743 lpfc_sli_abort_wqe_ring(phba, pring);
3744 }
3745}
3746
James Smartdb55fba2014-04-04 13:52:02 -04003747
3748/**
James Smart3621a712009-04-06 18:47:14 -04003749 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
James Smarta8e497d2008-08-24 21:50:11 -04003750 * @phba: Pointer to HBA context object.
3751 *
3752 * This function flushes all iocbs in the fcp ring and frees all the iocb
3753 * objects in txq and txcmplq. This function will not issue abort iocbs
3754 * for all the iocb commands in txcmplq, they will just be returned with
3755 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3756 * slot has been permanently disabled.
3757 **/
3758void
3759lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3760{
3761 LIST_HEAD(txq);
3762 LIST_HEAD(txcmplq);
James Smarta8e497d2008-08-24 21:50:11 -04003763 struct lpfc_sli *psli = &phba->sli;
3764 struct lpfc_sli_ring *pring;
James Smartdb55fba2014-04-04 13:52:02 -04003765 uint32_t i;
James Smarta8e497d2008-08-24 21:50:11 -04003766
3767 spin_lock_irq(&phba->hbalock);
James Smart4f2e66c2012-05-09 21:17:07 -04003768 /* Indicate the I/O queues are flushed */
3769 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
James Smarta8e497d2008-08-24 21:50:11 -04003770 spin_unlock_irq(&phba->hbalock);
3771
James Smartdb55fba2014-04-04 13:52:02 -04003772 /* Look on all the FCP Rings for the iotag */
3773 if (phba->sli_rev >= LPFC_SLI_REV4) {
3774 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
James Smart895427b2017-02-12 13:52:30 -08003775 pring = phba->sli4_hba.fcp_wq[i]->pring;
James Smarta8e497d2008-08-24 21:50:11 -04003776
James Smartdb55fba2014-04-04 13:52:02 -04003777 spin_lock_irq(&pring->ring_lock);
3778 /* Retrieve everything on txq */
3779 list_splice_init(&pring->txq, &txq);
3780 /* Retrieve everything on the txcmplq */
3781 list_splice_init(&pring->txcmplq, &txcmplq);
3782 pring->txq_cnt = 0;
3783 pring->txcmplq_cnt = 0;
3784 spin_unlock_irq(&pring->ring_lock);
3785
3786 /* Flush the txq */
3787 lpfc_sli_cancel_iocbs(phba, &txq,
3788 IOSTAT_LOCAL_REJECT,
3789 IOERR_SLI_DOWN);
3790 /* Flush the txcmpq */
3791 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3792 IOSTAT_LOCAL_REJECT,
3793 IOERR_SLI_DOWN);
3794 }
3795 } else {
James Smart895427b2017-02-12 13:52:30 -08003796 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04003797
3798 spin_lock_irq(&phba->hbalock);
3799 /* Retrieve everything on txq */
3800 list_splice_init(&pring->txq, &txq);
3801 /* Retrieve everything on the txcmplq */
3802 list_splice_init(&pring->txcmplq, &txcmplq);
3803 pring->txq_cnt = 0;
3804 pring->txcmplq_cnt = 0;
3805 spin_unlock_irq(&phba->hbalock);
3806
3807 /* Flush the txq */
3808 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3809 IOERR_SLI_DOWN);
3810 /* Flush the txcmpq */
3811 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3812 IOERR_SLI_DOWN);
3813 }
James Smarta8e497d2008-08-24 21:50:11 -04003814}
3815
3816/**
James Smart895427b2017-02-12 13:52:30 -08003817 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
3818 * @phba: Pointer to HBA context object.
3819 *
3820 * This function flushes all wqes in the nvme rings and frees all resources
3821 * in the txcmplq. This function does not issue abort wqes for the IO
3822 * commands in txcmplq, they will just be returned with
3823 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3824 * slot has been permanently disabled.
3825 **/
3826void
3827lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
3828{
3829 LIST_HEAD(txcmplq);
3830 struct lpfc_sli_ring *pring;
3831 uint32_t i;
3832
3833 if (phba->sli_rev < LPFC_SLI_REV4)
3834 return;
3835
3836 /* Hint to other driver operations that a flush is in progress. */
3837 spin_lock_irq(&phba->hbalock);
3838 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
3839 spin_unlock_irq(&phba->hbalock);
3840
3841 /* Cycle through all NVME rings and complete each IO with
3842 * a local driver reason code. This is a flush so no
3843 * abort exchange to FW.
3844 */
3845 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3846 pring = phba->sli4_hba.nvme_wq[i]->pring;
3847
3848 /* Retrieve everything on the txcmplq */
3849 spin_lock_irq(&pring->ring_lock);
3850 list_splice_init(&pring->txcmplq, &txcmplq);
3851 pring->txcmplq_cnt = 0;
3852 spin_unlock_irq(&pring->ring_lock);
3853
3854 /* Flush the txcmpq &&&PAE */
3855 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3856 IOSTAT_LOCAL_REJECT,
3857 IOERR_SLI_DOWN);
3858 }
3859}
3860
3861/**
James Smart3772a992009-05-22 14:50:54 -04003862 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
James Smarte59058c2008-08-24 21:49:00 -04003863 * @phba: Pointer to HBA context object.
3864 * @mask: Bit mask to be checked.
3865 *
3866 * This function reads the host status register and compares
3867 * with the provided bit mask to check if HBA completed
3868 * the restart. This function will wait in a loop for the
3869 * HBA to complete restart. If the HBA does not restart within
3870 * 15 iterations, the function will reset the HBA again. The
3871 * function returns 1 when HBA fail to restart otherwise returns
3872 * zero.
3873 **/
James Smart3772a992009-05-22 14:50:54 -04003874static int
3875lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea31012005-04-17 16:05:31 -05003876{
Jamie Wellnitz41415862006-02-28 19:25:27 -05003877 uint32_t status;
3878 int i = 0;
3879 int retval = 0;
dea31012005-04-17 16:05:31 -05003880
Jamie Wellnitz41415862006-02-28 19:25:27 -05003881 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05003882 if (lpfc_readl(phba->HSregaddr, &status))
3883 return 1;
dea31012005-04-17 16:05:31 -05003884
Jamie Wellnitz41415862006-02-28 19:25:27 -05003885 /*
3886 * Check status register every 100ms for 5 retries, then every
3887 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3888 * every 2.5 sec for 4.
3889 * Break our of the loop if errors occurred during init.
3890 */
3891 while (((status & mask) != mask) &&
3892 !(status & HS_FFERM) &&
3893 i++ < 20) {
dea31012005-04-17 16:05:31 -05003894
Jamie Wellnitz41415862006-02-28 19:25:27 -05003895 if (i <= 5)
3896 msleep(10);
3897 else if (i <= 10)
3898 msleep(500);
3899 else
3900 msleep(2500);
dea31012005-04-17 16:05:31 -05003901
Jamie Wellnitz41415862006-02-28 19:25:27 -05003902 if (i == 15) {
James Smart2e0fef82007-06-17 19:56:36 -05003903 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05003904 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003905 lpfc_sli_brdrestart(phba);
3906 }
3907 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05003908 if (lpfc_readl(phba->HSregaddr, &status)) {
3909 retval = 1;
3910 break;
3911 }
dea31012005-04-17 16:05:31 -05003912 }
dea31012005-04-17 16:05:31 -05003913
Jamie Wellnitz41415862006-02-28 19:25:27 -05003914 /* Check to see if any errors occurred during init */
3915 if ((status & HS_FFERM) || (i >= 20)) {
James Smarte40a02c2010-02-26 14:13:54 -05003916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3917 "2751 Adapter failed to restart, "
3918 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3919 status,
3920 readl(phba->MBslimaddr + 0xa8),
3921 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05003922 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003923 retval = 1;
3924 }
dea31012005-04-17 16:05:31 -05003925
Jamie Wellnitz41415862006-02-28 19:25:27 -05003926 return retval;
dea31012005-04-17 16:05:31 -05003927}
3928
James Smartda0436e2009-05-22 14:51:39 -04003929/**
3930 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3931 * @phba: Pointer to HBA context object.
3932 * @mask: Bit mask to be checked.
3933 *
3934 * This function checks the host status register to check if HBA is
3935 * ready. This function will wait in a loop for the HBA to be ready
3936 * If the HBA is not ready , the function will will reset the HBA PCI
3937 * function again. The function returns 1 when HBA fail to be ready
3938 * otherwise returns zero.
3939 **/
3940static int
3941lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3942{
3943 uint32_t status;
3944 int retval = 0;
3945
3946 /* Read the HBA Host Status Register */
3947 status = lpfc_sli4_post_status_check(phba);
3948
3949 if (status) {
3950 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3951 lpfc_sli_brdrestart(phba);
3952 status = lpfc_sli4_post_status_check(phba);
3953 }
3954
3955 /* Check to see if any errors occurred during init */
3956 if (status) {
3957 phba->link_state = LPFC_HBA_ERROR;
3958 retval = 1;
3959 } else
3960 phba->sli4_hba.intr_enable = 0;
3961
3962 return retval;
3963}
3964
3965/**
3966 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3967 * @phba: Pointer to HBA context object.
3968 * @mask: Bit mask to be checked.
3969 *
3970 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3971 * from the API jump table function pointer from the lpfc_hba struct.
3972 **/
3973int
3974lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3975{
3976 return phba->lpfc_sli_brdready(phba, mask);
3977}
3978
James Smart92908312006-03-07 15:04:13 -05003979#define BARRIER_TEST_PATTERN (0xdeadbeef)
3980
James Smarte59058c2008-08-24 21:49:00 -04003981/**
James Smart3621a712009-04-06 18:47:14 -04003982 * lpfc_reset_barrier - Make HBA ready for HBA reset
James Smarte59058c2008-08-24 21:49:00 -04003983 * @phba: Pointer to HBA context object.
3984 *
James Smart1b511972011-12-13 13:23:09 -05003985 * This function is called before resetting an HBA. This function is called
3986 * with hbalock held and requests HBA to quiesce DMAs before a reset.
James Smarte59058c2008-08-24 21:49:00 -04003987 **/
James Smart2e0fef82007-06-17 19:56:36 -05003988void lpfc_reset_barrier(struct lpfc_hba *phba)
James Smart92908312006-03-07 15:04:13 -05003989{
James Smart65a29c12006-07-06 15:50:50 -04003990 uint32_t __iomem *resp_buf;
3991 uint32_t __iomem *mbox_buf;
James Smart92908312006-03-07 15:04:13 -05003992 volatile uint32_t mbox;
James Smart9940b972011-03-11 16:06:12 -05003993 uint32_t hc_copy, ha_copy, resp_data;
James Smart92908312006-03-07 15:04:13 -05003994 int i;
3995 uint8_t hdrtype;
3996
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01003997 lockdep_assert_held(&phba->hbalock);
3998
James Smart92908312006-03-07 15:04:13 -05003999 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4000 if (hdrtype != 0x80 ||
4001 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4002 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4003 return;
4004
4005 /*
4006 * Tell the other part of the chip to suspend temporarily all
4007 * its DMA activity.
4008 */
James Smart65a29c12006-07-06 15:50:50 -04004009 resp_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004010
4011 /* Disable the error attention */
James Smart9940b972011-03-11 16:06:12 -05004012 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4013 return;
James Smart92908312006-03-07 15:04:13 -05004014 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4015 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004016 phba->link_flag |= LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004017
James Smart9940b972011-03-11 16:06:12 -05004018 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4019 return;
4020 if (ha_copy & HA_ERATT) {
James Smart92908312006-03-07 15:04:13 -05004021 /* Clear Chip error bit */
4022 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004023 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004024 }
4025
4026 mbox = 0;
4027 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4028 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4029
4030 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
James Smart65a29c12006-07-06 15:50:50 -04004031 mbox_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004032 writel(mbox, mbox_buf);
4033
James Smart9940b972011-03-11 16:06:12 -05004034 for (i = 0; i < 50; i++) {
4035 if (lpfc_readl((resp_buf + 1), &resp_data))
4036 return;
4037 if (resp_data != ~(BARRIER_TEST_PATTERN))
4038 mdelay(1);
4039 else
4040 break;
4041 }
4042 resp_data = 0;
4043 if (lpfc_readl((resp_buf + 1), &resp_data))
4044 return;
4045 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
James Smartf4b4c682009-05-22 14:53:12 -04004046 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
James Smart2e0fef82007-06-17 19:56:36 -05004047 phba->pport->stopped)
James Smart92908312006-03-07 15:04:13 -05004048 goto restore_hc;
4049 else
4050 goto clear_errat;
4051 }
4052
4053 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
James Smart9940b972011-03-11 16:06:12 -05004054 resp_data = 0;
4055 for (i = 0; i < 500; i++) {
4056 if (lpfc_readl(resp_buf, &resp_data))
4057 return;
4058 if (resp_data != mbox)
4059 mdelay(1);
4060 else
4061 break;
4062 }
James Smart92908312006-03-07 15:04:13 -05004063
4064clear_errat:
4065
James Smart9940b972011-03-11 16:06:12 -05004066 while (++i < 500) {
4067 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4068 return;
4069 if (!(ha_copy & HA_ERATT))
4070 mdelay(1);
4071 else
4072 break;
4073 }
James Smart92908312006-03-07 15:04:13 -05004074
4075 if (readl(phba->HAregaddr) & HA_ERATT) {
4076 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004077 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004078 }
4079
4080restore_hc:
James Smart2e0fef82007-06-17 19:56:36 -05004081 phba->link_flag &= ~LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004082 writel(hc_copy, phba->HCregaddr);
4083 readl(phba->HCregaddr); /* flush */
4084}
4085
James Smarte59058c2008-08-24 21:49:00 -04004086/**
James Smart3621a712009-04-06 18:47:14 -04004087 * lpfc_sli_brdkill - Issue a kill_board mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004088 * @phba: Pointer to HBA context object.
4089 *
4090 * This function issues a kill_board mailbox command and waits for
4091 * the error attention interrupt. This function is called for stopping
4092 * the firmware processing. The caller is not required to hold any
4093 * locks. This function calls lpfc_hba_down_post function to free
4094 * any pending commands after the kill. The function will return 1 when it
4095 * fails to kill the board else will return 0.
4096 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004097int
James Smart2e0fef82007-06-17 19:56:36 -05004098lpfc_sli_brdkill(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05004099{
Jamie Wellnitz41415862006-02-28 19:25:27 -05004100 struct lpfc_sli *psli;
4101 LPFC_MBOXQ_t *pmb;
4102 uint32_t status;
4103 uint32_t ha_copy;
4104 int retval;
4105 int i = 0;
4106
4107 psli = &phba->sli;
4108
4109 /* Kill HBA */
James Smarted957682007-06-17 19:56:37 -05004110 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004111 "0329 Kill HBA Data: x%x x%x\n",
4112 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004113
James Smart98c9ea52007-10-27 13:37:33 -04004114 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4115 if (!pmb)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004116 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004117
4118 /* Disable the error attention */
James Smart2e0fef82007-06-17 19:56:36 -05004119 spin_lock_irq(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -05004120 if (lpfc_readl(phba->HCregaddr, &status)) {
4121 spin_unlock_irq(&phba->hbalock);
4122 mempool_free(pmb, phba->mbox_mem_pool);
4123 return 1;
4124 }
Jamie Wellnitz41415862006-02-28 19:25:27 -05004125 status &= ~HC_ERINT_ENA;
4126 writel(status, phba->HCregaddr);
4127 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004128 phba->link_flag |= LS_IGNORE_ERATT;
4129 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004130
4131 lpfc_kill_board(phba, pmb);
4132 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4133 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4134
4135 if (retval != MBX_SUCCESS) {
4136 if (retval != MBX_BUSY)
4137 mempool_free(pmb, phba->mbox_mem_pool);
James Smarte40a02c2010-02-26 14:13:54 -05004138 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4139 "2752 KILL_BOARD command failed retval %d\n",
4140 retval);
James Smart2e0fef82007-06-17 19:56:36 -05004141 spin_lock_irq(&phba->hbalock);
4142 phba->link_flag &= ~LS_IGNORE_ERATT;
4143 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004144 return 1;
4145 }
4146
James Smartf4b4c682009-05-22 14:53:12 -04004147 spin_lock_irq(&phba->hbalock);
4148 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4149 spin_unlock_irq(&phba->hbalock);
James Smart92908312006-03-07 15:04:13 -05004150
Jamie Wellnitz41415862006-02-28 19:25:27 -05004151 mempool_free(pmb, phba->mbox_mem_pool);
4152
4153 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4154 * attention every 100ms for 3 seconds. If we don't get ERATT after
4155 * 3 seconds we still set HBA_ERROR state because the status of the
4156 * board is now undefined.
4157 */
James Smart9940b972011-03-11 16:06:12 -05004158 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4159 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004160 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4161 mdelay(100);
James Smart9940b972011-03-11 16:06:12 -05004162 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4163 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004164 }
4165
4166 del_timer_sync(&psli->mbox_tmo);
James Smart92908312006-03-07 15:04:13 -05004167 if (ha_copy & HA_ERATT) {
4168 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004169 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004170 }
James Smart2e0fef82007-06-17 19:56:36 -05004171 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004172 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart04c68492009-05-22 14:52:52 -04004173 psli->mbox_active = NULL;
James Smart2e0fef82007-06-17 19:56:36 -05004174 phba->link_flag &= ~LS_IGNORE_ERATT;
4175 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004176
Jamie Wellnitz41415862006-02-28 19:25:27 -05004177 lpfc_hba_down_post(phba);
James Smart2e0fef82007-06-17 19:56:36 -05004178 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004179
James Smart2e0fef82007-06-17 19:56:36 -05004180 return ha_copy & HA_ERATT ? 0 : 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004181}
4182
James Smarte59058c2008-08-24 21:49:00 -04004183/**
James Smart3772a992009-05-22 14:50:54 -04004184 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
James Smarte59058c2008-08-24 21:49:00 -04004185 * @phba: Pointer to HBA context object.
4186 *
4187 * This function resets the HBA by writing HC_INITFF to the control
4188 * register. After the HBA resets, this function resets all the iocb ring
4189 * indices. This function disables PCI layer parity checking during
4190 * the reset.
4191 * This function returns 0 always.
4192 * The caller is not required to hold any locks.
4193 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004194int
James Smart2e0fef82007-06-17 19:56:36 -05004195lpfc_sli_brdreset(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004196{
4197 struct lpfc_sli *psli;
dea31012005-04-17 16:05:31 -05004198 struct lpfc_sli_ring *pring;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004199 uint16_t cfg_value;
dea31012005-04-17 16:05:31 -05004200 int i;
dea31012005-04-17 16:05:31 -05004201
Jamie Wellnitz41415862006-02-28 19:25:27 -05004202 psli = &phba->sli;
dea31012005-04-17 16:05:31 -05004203
Jamie Wellnitz41415862006-02-28 19:25:27 -05004204 /* Reset HBA */
4205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004206 "0325 Reset HBA Data: x%x x%x\n",
James Smart2e0fef82007-06-17 19:56:36 -05004207 phba->pport->port_state, psli->sli_flag);
dea31012005-04-17 16:05:31 -05004208
4209 /* perform board reset */
4210 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004211 phba->link_events = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004212 phba->pport->fc_myDID = 0;
4213 phba->pport->fc_prevDID = 0;
dea31012005-04-17 16:05:31 -05004214
Jamie Wellnitz41415862006-02-28 19:25:27 -05004215 /* Turn off parity checking and serr during the physical reset */
4216 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4217 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4218 (cfg_value &
4219 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4220
James Smart3772a992009-05-22 14:50:54 -04004221 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4222
Jamie Wellnitz41415862006-02-28 19:25:27 -05004223 /* Now toggle INITFF bit in the Host Control Register */
4224 writel(HC_INITFF, phba->HCregaddr);
4225 mdelay(1);
4226 readl(phba->HCregaddr); /* flush */
4227 writel(0, phba->HCregaddr);
4228 readl(phba->HCregaddr); /* flush */
4229
4230 /* Restore PCI cmd register */
4231 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea31012005-04-17 16:05:31 -05004232
4233 /* Initialize relevant SLI info */
Jamie Wellnitz41415862006-02-28 19:25:27 -05004234 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08004235 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05004236 pring->flag = 0;
James Smart7e56aa22012-08-03 12:35:34 -04004237 pring->sli.sli3.rspidx = 0;
4238 pring->sli.sli3.next_cmdidx = 0;
4239 pring->sli.sli3.local_getidx = 0;
4240 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -05004241 pring->missbufcnt = 0;
4242 }
dea31012005-04-17 16:05:31 -05004243
James Smart2e0fef82007-06-17 19:56:36 -05004244 phba->link_state = LPFC_WARM_START;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004245 return 0;
4246}
4247
James Smarte59058c2008-08-24 21:49:00 -04004248/**
James Smartda0436e2009-05-22 14:51:39 -04004249 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4250 * @phba: Pointer to HBA context object.
4251 *
4252 * This function resets a SLI4 HBA. This function disables PCI layer parity
4253 * checking during resets the device. The caller is not required to hold
4254 * any locks.
4255 *
4256 * This function returns 0 always.
4257 **/
4258int
4259lpfc_sli4_brdreset(struct lpfc_hba *phba)
4260{
4261 struct lpfc_sli *psli = &phba->sli;
4262 uint16_t cfg_value;
James Smart02936352014-04-04 13:52:12 -04004263 int rc = 0;
James Smartda0436e2009-05-22 14:51:39 -04004264
4265 /* Reset HBA */
4266 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart02936352014-04-04 13:52:12 -04004267 "0295 Reset HBA Data: x%x x%x x%x\n",
4268 phba->pport->port_state, psli->sli_flag,
4269 phba->hba_flag);
James Smartda0436e2009-05-22 14:51:39 -04004270
4271 /* perform board reset */
4272 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004273 phba->link_events = 0;
James Smartda0436e2009-05-22 14:51:39 -04004274 phba->pport->fc_myDID = 0;
4275 phba->pport->fc_prevDID = 0;
4276
James Smartda0436e2009-05-22 14:51:39 -04004277 spin_lock_irq(&phba->hbalock);
4278 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4279 phba->fcf.fcf_flag = 0;
James Smartda0436e2009-05-22 14:51:39 -04004280 spin_unlock_irq(&phba->hbalock);
4281
James Smart02936352014-04-04 13:52:12 -04004282 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4283 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4284 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4285 return rc;
4286 }
4287
James Smartda0436e2009-05-22 14:51:39 -04004288 /* Now physically reset the device */
4289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4290 "0389 Performing PCI function reset!\n");
James Smartbe858b62010-12-15 17:57:20 -05004291
4292 /* Turn off parity checking and serr during the physical reset */
4293 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4294 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4295 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4296
James Smart88318812012-09-29 11:29:29 -04004297 /* Perform FCoE PCI function reset before freeing queue memory */
James Smart27b01b82012-05-09 21:19:44 -04004298 rc = lpfc_pci_function_reset(phba);
James Smart88318812012-09-29 11:29:29 -04004299 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04004300
James Smartbe858b62010-12-15 17:57:20 -05004301 /* Restore PCI cmd register */
4302 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4303
James Smart27b01b82012-05-09 21:19:44 -04004304 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004305}
4306
4307/**
4308 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
James Smarte59058c2008-08-24 21:49:00 -04004309 * @phba: Pointer to HBA context object.
4310 *
4311 * This function is called in the SLI initialization code path to
4312 * restart the HBA. The caller is not required to hold any lock.
4313 * This function writes MBX_RESTART mailbox command to the SLIM and
4314 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4315 * function to free any pending commands. The function enables
4316 * POST only during the first initialization. The function returns zero.
4317 * The function does not guarantee completion of MBX_RESTART mailbox
4318 * command before the return of this function.
4319 **/
James Smartda0436e2009-05-22 14:51:39 -04004320static int
4321lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004322{
4323 MAILBOX_t *mb;
4324 struct lpfc_sli *psli;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004325 volatile uint32_t word0;
4326 void __iomem *to_slim;
James Smart0d878412009-10-02 15:16:56 -04004327 uint32_t hba_aer_enabled;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004328
James Smart2e0fef82007-06-17 19:56:36 -05004329 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004330
James Smart0d878412009-10-02 15:16:56 -04004331 /* Take PCIe device Advanced Error Reporting (AER) state */
4332 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4333
Jamie Wellnitz41415862006-02-28 19:25:27 -05004334 psli = &phba->sli;
4335
4336 /* Restart HBA */
4337 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004338 "0337 Restart HBA Data: x%x x%x\n",
James Smart2e0fef82007-06-17 19:56:36 -05004339 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004340
4341 word0 = 0;
4342 mb = (MAILBOX_t *) &word0;
4343 mb->mbxCommand = MBX_RESTART;
4344 mb->mbxHc = 1;
4345
James Smart92908312006-03-07 15:04:13 -05004346 lpfc_reset_barrier(phba);
4347
Jamie Wellnitz41415862006-02-28 19:25:27 -05004348 to_slim = phba->MBslimaddr;
4349 writel(*(uint32_t *) mb, to_slim);
4350 readl(to_slim); /* flush */
4351
4352 /* Only skip post after fc_ffinit is completed */
James Smarteaf15d52008-12-04 22:39:29 -05004353 if (phba->pport->port_state)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004354 word0 = 1; /* This is really setting up word1 */
James Smarteaf15d52008-12-04 22:39:29 -05004355 else
Jamie Wellnitz41415862006-02-28 19:25:27 -05004356 word0 = 0; /* This is really setting up word1 */
James Smart65a29c12006-07-06 15:50:50 -04004357 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004358 writel(*(uint32_t *) mb, to_slim);
4359 readl(to_slim); /* flush */
4360
4361 lpfc_sli_brdreset(phba);
James Smart2e0fef82007-06-17 19:56:36 -05004362 phba->pport->stopped = 0;
4363 phba->link_state = LPFC_INIT_START;
James Smartda0436e2009-05-22 14:51:39 -04004364 phba->hba_flag = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004365 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004366
James Smart64ba8812006-08-02 15:24:34 -04004367 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4368 psli->stats_start = get_seconds();
4369
James Smarteaf15d52008-12-04 22:39:29 -05004370 /* Give the INITFF and Post time to settle. */
4371 mdelay(100);
dea31012005-04-17 16:05:31 -05004372
James Smart0d878412009-10-02 15:16:56 -04004373 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4374 if (hba_aer_enabled)
4375 pci_disable_pcie_error_reporting(phba->pcidev);
4376
Jamie Wellnitz41415862006-02-28 19:25:27 -05004377 lpfc_hba_down_post(phba);
dea31012005-04-17 16:05:31 -05004378
4379 return 0;
4380}
4381
James Smarte59058c2008-08-24 21:49:00 -04004382/**
James Smartda0436e2009-05-22 14:51:39 -04004383 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4384 * @phba: Pointer to HBA context object.
4385 *
4386 * This function is called in the SLI initialization code path to restart
4387 * a SLI4 HBA. The caller is not required to hold any lock.
4388 * At the end of the function, it calls lpfc_hba_down_post function to
4389 * free any pending commands.
4390 **/
4391static int
4392lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4393{
4394 struct lpfc_sli *psli = &phba->sli;
James Smart75baf692010-06-08 18:31:21 -04004395 uint32_t hba_aer_enabled;
James Smart27b01b82012-05-09 21:19:44 -04004396 int rc;
James Smartda0436e2009-05-22 14:51:39 -04004397
4398 /* Restart HBA */
4399 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4400 "0296 Restart HBA Data: x%x x%x\n",
4401 phba->pport->port_state, psli->sli_flag);
4402
James Smart75baf692010-06-08 18:31:21 -04004403 /* Take PCIe device Advanced Error Reporting (AER) state */
4404 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4405
James Smart27b01b82012-05-09 21:19:44 -04004406 rc = lpfc_sli4_brdreset(phba);
James Smartda0436e2009-05-22 14:51:39 -04004407
4408 spin_lock_irq(&phba->hbalock);
4409 phba->pport->stopped = 0;
4410 phba->link_state = LPFC_INIT_START;
4411 phba->hba_flag = 0;
4412 spin_unlock_irq(&phba->hbalock);
4413
4414 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4415 psli->stats_start = get_seconds();
4416
James Smart75baf692010-06-08 18:31:21 -04004417 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4418 if (hba_aer_enabled)
4419 pci_disable_pcie_error_reporting(phba->pcidev);
4420
James Smartda0436e2009-05-22 14:51:39 -04004421 lpfc_hba_down_post(phba);
4422
James Smart27b01b82012-05-09 21:19:44 -04004423 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004424}
4425
4426/**
4427 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4428 * @phba: Pointer to HBA context object.
4429 *
4430 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4431 * API jump table function pointer from the lpfc_hba struct.
4432**/
4433int
4434lpfc_sli_brdrestart(struct lpfc_hba *phba)
4435{
4436 return phba->lpfc_sli_brdrestart(phba);
4437}
4438
4439/**
James Smart3621a712009-04-06 18:47:14 -04004440 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
James Smarte59058c2008-08-24 21:49:00 -04004441 * @phba: Pointer to HBA context object.
4442 *
4443 * This function is called after a HBA restart to wait for successful
4444 * restart of the HBA. Successful restart of the HBA is indicated by
4445 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4446 * iteration, the function will restart the HBA again. The function returns
4447 * zero if HBA successfully restarted else returns negative error code.
4448 **/
dea31012005-04-17 16:05:31 -05004449static int
4450lpfc_sli_chipset_init(struct lpfc_hba *phba)
4451{
4452 uint32_t status, i = 0;
4453
4454 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004455 if (lpfc_readl(phba->HSregaddr, &status))
4456 return -EIO;
dea31012005-04-17 16:05:31 -05004457
4458 /* Check status register to see what current state is */
4459 i = 0;
4460 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4461
James Smartdcf2a4e2010-09-29 11:18:53 -04004462 /* Check every 10ms for 10 retries, then every 100ms for 90
4463 * retries, then every 1 sec for 50 retires for a total of
4464 * ~60 seconds before reset the board again and check every
4465 * 1 sec for 50 retries. The up to 60 seconds before the
4466 * board ready is required by the Falcon FIPS zeroization
4467 * complete, and any reset the board in between shall cause
4468 * restart of zeroization, further delay the board ready.
dea31012005-04-17 16:05:31 -05004469 */
James Smartdcf2a4e2010-09-29 11:18:53 -04004470 if (i++ >= 200) {
dea31012005-04-17 16:05:31 -05004471 /* Adapter failed to init, timeout, status reg
4472 <status> */
James Smarted957682007-06-17 19:56:37 -05004473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004474 "0436 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004475 "timeout, status reg x%x, "
4476 "FW Data: A8 x%x AC x%x\n", status,
4477 readl(phba->MBslimaddr + 0xa8),
4478 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004479 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004480 return -ETIMEDOUT;
4481 }
4482
4483 /* Check to see if any errors occurred during init */
4484 if (status & HS_FFERM) {
4485 /* ERROR: During chipset initialization */
4486 /* Adapter failed to init, chipset, status reg
4487 <status> */
James Smarted957682007-06-17 19:56:37 -05004488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004489 "0437 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004490 "chipset, status reg x%x, "
4491 "FW Data: A8 x%x AC x%x\n", status,
4492 readl(phba->MBslimaddr + 0xa8),
4493 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004494 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004495 return -EIO;
4496 }
4497
James Smartdcf2a4e2010-09-29 11:18:53 -04004498 if (i <= 10)
dea31012005-04-17 16:05:31 -05004499 msleep(10);
James Smartdcf2a4e2010-09-29 11:18:53 -04004500 else if (i <= 100)
4501 msleep(100);
4502 else
4503 msleep(1000);
dea31012005-04-17 16:05:31 -05004504
James Smartdcf2a4e2010-09-29 11:18:53 -04004505 if (i == 150) {
4506 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05004507 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004508 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004509 }
4510 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004511 if (lpfc_readl(phba->HSregaddr, &status))
4512 return -EIO;
dea31012005-04-17 16:05:31 -05004513 }
4514
4515 /* Check to see if any errors occurred during init */
4516 if (status & HS_FFERM) {
4517 /* ERROR: During chipset initialization */
4518 /* Adapter failed to init, chipset, status reg <status> */
James Smarted957682007-06-17 19:56:37 -05004519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004520 "0438 Adapter failed to init, chipset, "
James Smart09372822008-01-11 01:52:54 -05004521 "status reg x%x, "
4522 "FW Data: A8 x%x AC x%x\n", status,
4523 readl(phba->MBslimaddr + 0xa8),
4524 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004525 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004526 return -EIO;
4527 }
4528
4529 /* Clear all interrupt enable conditions */
4530 writel(0, phba->HCregaddr);
4531 readl(phba->HCregaddr); /* flush */
4532
4533 /* setup host attn register */
4534 writel(0xffffffff, phba->HAregaddr);
4535 readl(phba->HAregaddr); /* flush */
4536 return 0;
4537}
4538
James Smarte59058c2008-08-24 21:49:00 -04004539/**
James Smart3621a712009-04-06 18:47:14 -04004540 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
James Smarte59058c2008-08-24 21:49:00 -04004541 *
4542 * This function calculates and returns the number of HBQs required to be
4543 * configured.
4544 **/
James Smart78b2d852007-08-02 11:10:21 -04004545int
James Smarted957682007-06-17 19:56:37 -05004546lpfc_sli_hbq_count(void)
4547{
James Smart92d7f7b2007-06-17 19:56:38 -05004548 return ARRAY_SIZE(lpfc_hbq_defs);
James Smarted957682007-06-17 19:56:37 -05004549}
4550
James Smarte59058c2008-08-24 21:49:00 -04004551/**
James Smart3621a712009-04-06 18:47:14 -04004552 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004553 *
4554 * This function adds the number of hbq entries in every HBQ to get
4555 * the total number of hbq entries required for the HBA and returns
4556 * the total count.
4557 **/
James Smarted957682007-06-17 19:56:37 -05004558static int
4559lpfc_sli_hbq_entry_count(void)
4560{
4561 int hbq_count = lpfc_sli_hbq_count();
4562 int count = 0;
4563 int i;
4564
4565 for (i = 0; i < hbq_count; ++i)
James Smart92d7f7b2007-06-17 19:56:38 -05004566 count += lpfc_hbq_defs[i]->entry_count;
James Smarted957682007-06-17 19:56:37 -05004567 return count;
4568}
4569
James Smarte59058c2008-08-24 21:49:00 -04004570/**
James Smart3621a712009-04-06 18:47:14 -04004571 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004572 *
4573 * This function calculates amount of memory required for all hbq entries
4574 * to be configured and returns the total memory required.
4575 **/
dea31012005-04-17 16:05:31 -05004576int
James Smarted957682007-06-17 19:56:37 -05004577lpfc_sli_hbq_size(void)
4578{
4579 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4580}
4581
James Smarte59058c2008-08-24 21:49:00 -04004582/**
James Smart3621a712009-04-06 18:47:14 -04004583 * lpfc_sli_hbq_setup - configure and initialize HBQs
James Smarte59058c2008-08-24 21:49:00 -04004584 * @phba: Pointer to HBA context object.
4585 *
4586 * This function is called during the SLI initialization to configure
4587 * all the HBQs and post buffers to the HBQ. The caller is not
4588 * required to hold any locks. This function will return zero if successful
4589 * else it will return negative error code.
4590 **/
James Smarted957682007-06-17 19:56:37 -05004591static int
4592lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4593{
4594 int hbq_count = lpfc_sli_hbq_count();
4595 LPFC_MBOXQ_t *pmb;
4596 MAILBOX_t *pmbox;
4597 uint32_t hbqno;
4598 uint32_t hbq_entry_index;
James Smarted957682007-06-17 19:56:37 -05004599
James Smart92d7f7b2007-06-17 19:56:38 -05004600 /* Get a Mailbox buffer to setup mailbox
4601 * commands for HBA initialization
4602 */
James Smarted957682007-06-17 19:56:37 -05004603 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4604
4605 if (!pmb)
4606 return -ENOMEM;
4607
James Smart04c68492009-05-22 14:52:52 -04004608 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05004609
4610 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4611 phba->link_state = LPFC_INIT_MBX_CMDS;
James Smart3163f722008-02-08 18:50:25 -05004612 phba->hbq_in_use = 1;
James Smarted957682007-06-17 19:56:37 -05004613
4614 hbq_entry_index = 0;
4615 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4616 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4617 phba->hbqs[hbqno].hbqPutIdx = 0;
4618 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4619 phba->hbqs[hbqno].entry_count =
James Smart92d7f7b2007-06-17 19:56:38 -05004620 lpfc_hbq_defs[hbqno]->entry_count;
James Smart51ef4c22007-08-02 11:10:31 -04004621 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4622 hbq_entry_index, pmb);
James Smarted957682007-06-17 19:56:37 -05004623 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4624
4625 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4626 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4627 mbxStatus <status>, ring <num> */
4628
4629 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05004630 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004631 "1805 Adapter failed to init. "
James Smarted957682007-06-17 19:56:37 -05004632 "Data: x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04004633 pmbox->mbxCommand,
James Smarted957682007-06-17 19:56:37 -05004634 pmbox->mbxStatus, hbqno);
4635
4636 phba->link_state = LPFC_HBA_ERROR;
4637 mempool_free(pmb, phba->mbox_mem_pool);
James Smart6e7288d2010-06-07 15:23:35 -04004638 return -ENXIO;
James Smarted957682007-06-17 19:56:37 -05004639 }
4640 }
4641 phba->hbq_count = hbq_count;
4642
James Smarted957682007-06-17 19:56:37 -05004643 mempool_free(pmb, phba->mbox_mem_pool);
4644
James Smart92d7f7b2007-06-17 19:56:38 -05004645 /* Initially populate or replenish the HBQs */
James Smartd7c255b2008-08-24 21:50:00 -04004646 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4647 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
James Smarted957682007-06-17 19:56:37 -05004648 return 0;
4649}
4650
James Smarte59058c2008-08-24 21:49:00 -04004651/**
James Smart4f774512009-05-22 14:52:35 -04004652 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4653 * @phba: Pointer to HBA context object.
4654 *
4655 * This function is called during the SLI initialization to configure
4656 * all the HBQs and post buffers to the HBQ. The caller is not
4657 * required to hold any locks. This function will return zero if successful
4658 * else it will return negative error code.
4659 **/
4660static int
4661lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4662{
4663 phba->hbq_in_use = 1;
James Smart895427b2017-02-12 13:52:30 -08004664 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4665 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
James Smart4f774512009-05-22 14:52:35 -04004666 phba->hbq_count = 1;
James Smart895427b2017-02-12 13:52:30 -08004667 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
James Smart4f774512009-05-22 14:52:35 -04004668 /* Initially populate or replenish the HBQs */
James Smart4f774512009-05-22 14:52:35 -04004669 return 0;
4670}
4671
4672/**
James Smart3621a712009-04-06 18:47:14 -04004673 * lpfc_sli_config_port - Issue config port mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004674 * @phba: Pointer to HBA context object.
4675 * @sli_mode: sli mode - 2/3
4676 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08004677 * This function is called by the sli initialization code path
James Smarte59058c2008-08-24 21:49:00 -04004678 * to issue config_port mailbox command. This function restarts the
4679 * HBA firmware and issues a config_port mailbox command to configure
4680 * the SLI interface in the sli mode specified by sli_mode
4681 * variable. The caller is not required to hold any locks.
4682 * The function returns 0 if successful, else returns negative error
4683 * code.
4684 **/
James Smart93996272008-08-24 21:50:30 -04004685int
4686lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea31012005-04-17 16:05:31 -05004687{
4688 LPFC_MBOXQ_t *pmb;
4689 uint32_t resetcount = 0, rc = 0, done = 0;
4690
4691 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4692 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -05004693 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004694 return -ENOMEM;
4695 }
4696
James Smarted957682007-06-17 19:56:37 -05004697 phba->sli_rev = sli_mode;
dea31012005-04-17 16:05:31 -05004698 while (resetcount < 2 && !done) {
James Smart2e0fef82007-06-17 19:56:36 -05004699 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004700 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004701 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05004702 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004703 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004704 rc = lpfc_sli_chipset_init(phba);
4705 if (rc)
4706 break;
4707
James Smart2e0fef82007-06-17 19:56:36 -05004708 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004709 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004710 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004711 resetcount++;
4712
James Smarted957682007-06-17 19:56:37 -05004713 /* Call pre CONFIG_PORT mailbox command initialization. A
4714 * value of 0 means the call was successful. Any other
4715 * nonzero value is a failure, but if ERESTART is returned,
4716 * the driver may reset the HBA and try again.
4717 */
dea31012005-04-17 16:05:31 -05004718 rc = lpfc_config_port_prep(phba);
4719 if (rc == -ERESTART) {
James Smarted957682007-06-17 19:56:37 -05004720 phba->link_state = LPFC_LINK_UNKNOWN;
dea31012005-04-17 16:05:31 -05004721 continue;
James Smart34b02dc2008-08-24 21:49:55 -04004722 } else if (rc)
dea31012005-04-17 16:05:31 -05004723 break;
James Smart6d368e52011-05-24 11:44:12 -04004724
James Smart2e0fef82007-06-17 19:56:36 -05004725 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05004726 lpfc_config_port(phba, pmb);
4727 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
James Smart34b02dc2008-08-24 21:49:55 -04004728 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4729 LPFC_SLI3_HBQ_ENABLED |
4730 LPFC_SLI3_CRP_ENABLED |
James Smartbc739052010-08-04 16:11:18 -04004731 LPFC_SLI3_BG_ENABLED |
4732 LPFC_SLI3_DSS_ENABLED);
James Smarted957682007-06-17 19:56:37 -05004733 if (rc != MBX_SUCCESS) {
dea31012005-04-17 16:05:31 -05004734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004735 "0442 Adapter failed to init, mbxCmd x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05004736 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
James Smart04c68492009-05-22 14:52:52 -04004737 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
James Smart2e0fef82007-06-17 19:56:36 -05004738 spin_lock_irq(&phba->hbalock);
James Smart04c68492009-05-22 14:52:52 -04004739 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004740 spin_unlock_irq(&phba->hbalock);
4741 rc = -ENXIO;
James Smart04c68492009-05-22 14:52:52 -04004742 } else {
4743 /* Allow asynchronous mailbox command to go through */
4744 spin_lock_irq(&phba->hbalock);
4745 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4746 spin_unlock_irq(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05004747 done = 1;
James Smartcb69f7d2011-12-13 13:21:57 -05004748
4749 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4750 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4751 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4752 "3110 Port did not grant ASABT\n");
James Smart04c68492009-05-22 14:52:52 -04004753 }
dea31012005-04-17 16:05:31 -05004754 }
James Smarted957682007-06-17 19:56:37 -05004755 if (!done) {
4756 rc = -EINVAL;
4757 goto do_prep_failed;
4758 }
James Smart04c68492009-05-22 14:52:52 -04004759 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4760 if (!pmb->u.mb.un.varCfgPort.cMA) {
James Smart34b02dc2008-08-24 21:49:55 -04004761 rc = -ENXIO;
4762 goto do_prep_failed;
4763 }
James Smart04c68492009-05-22 14:52:52 -04004764 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
James Smart34b02dc2008-08-24 21:49:55 -04004765 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04004766 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4767 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4768 phba->max_vpi : phba->max_vports;
4769
James Smart34b02dc2008-08-24 21:49:55 -04004770 } else
4771 phba->max_vpi = 0;
James Smartbc739052010-08-04 16:11:18 -04004772 phba->fips_level = 0;
4773 phba->fips_spec_rev = 0;
4774 if (pmb->u.mb.un.varCfgPort.gdss) {
James Smart04c68492009-05-22 14:52:52 -04004775 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
James Smartbc739052010-08-04 16:11:18 -04004776 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4777 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4778 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4779 "2850 Security Crypto Active. FIPS x%d "
4780 "(Spec Rev: x%d)",
4781 phba->fips_level, phba->fips_spec_rev);
4782 }
4783 if (pmb->u.mb.un.varCfgPort.sec_err) {
4784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4785 "2856 Config Port Security Crypto "
4786 "Error: x%x ",
4787 pmb->u.mb.un.varCfgPort.sec_err);
4788 }
James Smart04c68492009-05-22 14:52:52 -04004789 if (pmb->u.mb.un.varCfgPort.gerbm)
James Smart34b02dc2008-08-24 21:49:55 -04004790 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04004791 if (pmb->u.mb.un.varCfgPort.gcrp)
James Smart34b02dc2008-08-24 21:49:55 -04004792 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
James Smart6e7288d2010-06-07 15:23:35 -04004793
4794 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4795 phba->port_gp = phba->mbox->us.s3_pgp.port;
James Smarte2a0a9d2008-12-04 22:40:02 -05004796
4797 if (phba->cfg_enable_bg) {
James Smart04c68492009-05-22 14:52:52 -04004798 if (pmb->u.mb.un.varCfgPort.gbg)
James Smarte2a0a9d2008-12-04 22:40:02 -05004799 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4800 else
4801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4802 "0443 Adapter did not grant "
4803 "BlockGuard\n");
4804 }
James Smart34b02dc2008-08-24 21:49:55 -04004805 } else {
James Smart8f34f4c2008-12-04 22:39:23 -05004806 phba->hbq_get = NULL;
James Smart34b02dc2008-08-24 21:49:55 -04004807 phba->port_gp = phba->mbox->us.s2.port;
James Smartd7c255b2008-08-24 21:50:00 -04004808 phba->max_vpi = 0;
James Smarted957682007-06-17 19:56:37 -05004809 }
James Smart92d7f7b2007-06-17 19:56:38 -05004810do_prep_failed:
James Smarted957682007-06-17 19:56:37 -05004811 mempool_free(pmb, phba->mbox_mem_pool);
4812 return rc;
4813}
4814
James Smarte59058c2008-08-24 21:49:00 -04004815
4816/**
Masahiro Yamada183b8022017-02-27 14:29:20 -08004817 * lpfc_sli_hba_setup - SLI initialization function
James Smarte59058c2008-08-24 21:49:00 -04004818 * @phba: Pointer to HBA context object.
4819 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08004820 * This function is the main SLI initialization function. This function
4821 * is called by the HBA initialization code, HBA reset code and HBA
James Smarte59058c2008-08-24 21:49:00 -04004822 * error attention handler code. Caller is not required to hold any
4823 * locks. This function issues config_port mailbox command to configure
4824 * the SLI, setup iocb rings and HBQ rings. In the end the function
4825 * calls the config_port_post function to issue init_link mailbox
4826 * command and to start the discovery. The function will return zero
4827 * if successful, else it will return negative error code.
4828 **/
James Smarted957682007-06-17 19:56:37 -05004829int
4830lpfc_sli_hba_setup(struct lpfc_hba *phba)
4831{
4832 uint32_t rc;
James Smart6d368e52011-05-24 11:44:12 -04004833 int mode = 3, i;
4834 int longs;
James Smarted957682007-06-17 19:56:37 -05004835
James Smart12247e82016-07-06 12:36:09 -07004836 switch (phba->cfg_sli_mode) {
James Smarted957682007-06-17 19:56:37 -05004837 case 2:
James Smart78b2d852007-08-02 11:10:21 -04004838 if (phba->cfg_enable_npiv) {
James Smart92d7f7b2007-06-17 19:56:38 -05004839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smart12247e82016-07-06 12:36:09 -07004840 "1824 NPIV enabled: Override sli_mode "
James Smart92d7f7b2007-06-17 19:56:38 -05004841 "parameter (%d) to auto (0).\n",
James Smart12247e82016-07-06 12:36:09 -07004842 phba->cfg_sli_mode);
James Smart92d7f7b2007-06-17 19:56:38 -05004843 break;
4844 }
James Smarted957682007-06-17 19:56:37 -05004845 mode = 2;
4846 break;
4847 case 0:
4848 case 3:
4849 break;
4850 default:
James Smart92d7f7b2007-06-17 19:56:38 -05004851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smart12247e82016-07-06 12:36:09 -07004852 "1819 Unrecognized sli_mode parameter: %d.\n",
4853 phba->cfg_sli_mode);
James Smarted957682007-06-17 19:56:37 -05004854
4855 break;
4856 }
James Smartb5c53952016-03-31 14:12:30 -07004857 phba->fcp_embed_io = 0; /* SLI4 FC support only */
James Smarted957682007-06-17 19:56:37 -05004858
James Smart93996272008-08-24 21:50:30 -04004859 rc = lpfc_sli_config_port(phba, mode);
4860
James Smart12247e82016-07-06 12:36:09 -07004861 if (rc && phba->cfg_sli_mode == 3)
James Smart92d7f7b2007-06-17 19:56:38 -05004862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004863 "1820 Unable to select SLI-3. "
4864 "Not supported by adapter.\n");
James Smarted957682007-06-17 19:56:37 -05004865 if (rc && mode != 2)
James Smart93996272008-08-24 21:50:30 -04004866 rc = lpfc_sli_config_port(phba, 2);
James Smart4597663f2016-07-06 12:36:01 -07004867 else if (rc && mode == 2)
4868 rc = lpfc_sli_config_port(phba, 3);
James Smarted957682007-06-17 19:56:37 -05004869 if (rc)
dea31012005-04-17 16:05:31 -05004870 goto lpfc_sli_hba_setup_error;
4871
James Smart0d878412009-10-02 15:16:56 -04004872 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4873 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4874 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4875 if (!rc) {
4876 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4877 "2709 This device supports "
4878 "Advanced Error Reporting (AER)\n");
4879 spin_lock_irq(&phba->hbalock);
4880 phba->hba_flag |= HBA_AER_ENABLED;
4881 spin_unlock_irq(&phba->hbalock);
4882 } else {
4883 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4884 "2708 This device does not support "
James Smartb069d7e2013-05-31 17:04:36 -04004885 "Advanced Error Reporting (AER): %d\n",
4886 rc);
James Smart0d878412009-10-02 15:16:56 -04004887 phba->cfg_aer_support = 0;
4888 }
4889 }
4890
James Smarted957682007-06-17 19:56:37 -05004891 if (phba->sli_rev == 3) {
4892 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4893 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
James Smarted957682007-06-17 19:56:37 -05004894 } else {
4895 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4896 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
James Smart92d7f7b2007-06-17 19:56:38 -05004897 phba->sli3_options = 0;
James Smarted957682007-06-17 19:56:37 -05004898 }
4899
4900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004901 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4902 phba->sli_rev, phba->max_vpi);
James Smarted957682007-06-17 19:56:37 -05004903 rc = lpfc_sli_ring_map(phba);
dea31012005-04-17 16:05:31 -05004904
4905 if (rc)
4906 goto lpfc_sli_hba_setup_error;
4907
James Smart6d368e52011-05-24 11:44:12 -04004908 /* Initialize VPIs. */
4909 if (phba->sli_rev == LPFC_SLI_REV3) {
4910 /*
4911 * The VPI bitmask and physical ID array are allocated
4912 * and initialized once only - at driver load. A port
4913 * reset doesn't need to reinitialize this memory.
4914 */
4915 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4916 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4917 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4918 GFP_KERNEL);
4919 if (!phba->vpi_bmask) {
4920 rc = -ENOMEM;
4921 goto lpfc_sli_hba_setup_error;
4922 }
4923
4924 phba->vpi_ids = kzalloc(
4925 (phba->max_vpi+1) * sizeof(uint16_t),
4926 GFP_KERNEL);
4927 if (!phba->vpi_ids) {
4928 kfree(phba->vpi_bmask);
4929 rc = -ENOMEM;
4930 goto lpfc_sli_hba_setup_error;
4931 }
4932 for (i = 0; i < phba->max_vpi; i++)
4933 phba->vpi_ids[i] = i;
4934 }
4935 }
4936
James Smart93996272008-08-24 21:50:30 -04004937 /* Init HBQs */
James Smarted957682007-06-17 19:56:37 -05004938 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4939 rc = lpfc_sli_hbq_setup(phba);
4940 if (rc)
4941 goto lpfc_sli_hba_setup_error;
4942 }
James Smart04c68492009-05-22 14:52:52 -04004943 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004944 phba->sli.sli_flag |= LPFC_PROCESS_LA;
James Smart04c68492009-05-22 14:52:52 -04004945 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004946
4947 rc = lpfc_config_port_post(phba);
4948 if (rc)
4949 goto lpfc_sli_hba_setup_error;
4950
James Smarted957682007-06-17 19:56:37 -05004951 return rc;
4952
James Smart92d7f7b2007-06-17 19:56:38 -05004953lpfc_sli_hba_setup_error:
James Smart2e0fef82007-06-17 19:56:36 -05004954 phba->link_state = LPFC_HBA_ERROR;
James Smarte40a02c2010-02-26 14:13:54 -05004955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004956 "0445 Firmware initialization failed\n");
dea31012005-04-17 16:05:31 -05004957 return rc;
4958}
4959
James Smartda0436e2009-05-22 14:51:39 -04004960/**
4961 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4962 * @phba: Pointer to HBA context object.
4963 * @mboxq: mailbox pointer.
4964 * This function issue a dump mailbox command to read config region
4965 * 23 and parse the records in the region and populate driver
4966 * data structure.
4967 **/
4968static int
James Smartff78d8f2011-12-13 13:21:35 -05004969lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
James Smartda0436e2009-05-22 14:51:39 -04004970{
James Smartff78d8f2011-12-13 13:21:35 -05004971 LPFC_MBOXQ_t *mboxq;
James Smartda0436e2009-05-22 14:51:39 -04004972 struct lpfc_dmabuf *mp;
4973 struct lpfc_mqe *mqe;
4974 uint32_t data_length;
4975 int rc;
4976
4977 /* Program the default value of vlan_id and fc_map */
4978 phba->valid_vlan = 0;
4979 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4980 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4981 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4982
James Smartff78d8f2011-12-13 13:21:35 -05004983 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4984 if (!mboxq)
James Smartda0436e2009-05-22 14:51:39 -04004985 return -ENOMEM;
4986
James Smartff78d8f2011-12-13 13:21:35 -05004987 mqe = &mboxq->u.mqe;
4988 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4989 rc = -ENOMEM;
4990 goto out_free_mboxq;
4991 }
4992
James Smartda0436e2009-05-22 14:51:39 -04004993 mp = (struct lpfc_dmabuf *) mboxq->context1;
4994 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4995
4996 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4997 "(%d):2571 Mailbox cmd x%x Status x%x "
4998 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4999 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5000 "CQ: x%x x%x x%x x%x\n",
5001 mboxq->vport ? mboxq->vport->vpi : 0,
5002 bf_get(lpfc_mqe_command, mqe),
5003 bf_get(lpfc_mqe_status, mqe),
5004 mqe->un.mb_words[0], mqe->un.mb_words[1],
5005 mqe->un.mb_words[2], mqe->un.mb_words[3],
5006 mqe->un.mb_words[4], mqe->un.mb_words[5],
5007 mqe->un.mb_words[6], mqe->un.mb_words[7],
5008 mqe->un.mb_words[8], mqe->un.mb_words[9],
5009 mqe->un.mb_words[10], mqe->un.mb_words[11],
5010 mqe->un.mb_words[12], mqe->un.mb_words[13],
5011 mqe->un.mb_words[14], mqe->un.mb_words[15],
5012 mqe->un.mb_words[16], mqe->un.mb_words[50],
5013 mboxq->mcqe.word0,
5014 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5015 mboxq->mcqe.trailer);
5016
5017 if (rc) {
5018 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5019 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005020 rc = -EIO;
5021 goto out_free_mboxq;
James Smartda0436e2009-05-22 14:51:39 -04005022 }
5023 data_length = mqe->un.mb_words[5];
James Smarta0c87cb2009-07-19 10:01:10 -04005024 if (data_length > DMP_RGN23_SIZE) {
James Smartd11e31d2009-06-10 17:23:06 -04005025 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5026 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005027 rc = -EIO;
5028 goto out_free_mboxq;
James Smartd11e31d2009-06-10 17:23:06 -04005029 }
James Smartda0436e2009-05-22 14:51:39 -04005030
5031 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5032 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5033 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005034 rc = 0;
5035
5036out_free_mboxq:
5037 mempool_free(mboxq, phba->mbox_mem_pool);
5038 return rc;
James Smartda0436e2009-05-22 14:51:39 -04005039}
5040
5041/**
5042 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5043 * @phba: pointer to lpfc hba data structure.
5044 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5045 * @vpd: pointer to the memory to hold resulting port vpd data.
5046 * @vpd_size: On input, the number of bytes allocated to @vpd.
5047 * On output, the number of data bytes in @vpd.
5048 *
5049 * This routine executes a READ_REV SLI4 mailbox command. In
5050 * addition, this routine gets the port vpd data.
5051 *
5052 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02005053 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -04005054 * -ENOMEM - could not allocated memory.
James Smartda0436e2009-05-22 14:51:39 -04005055 **/
5056static int
5057lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5058 uint8_t *vpd, uint32_t *vpd_size)
5059{
5060 int rc = 0;
5061 uint32_t dma_size;
5062 struct lpfc_dmabuf *dmabuf;
5063 struct lpfc_mqe *mqe;
5064
5065 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5066 if (!dmabuf)
5067 return -ENOMEM;
5068
5069 /*
5070 * Get a DMA buffer for the vpd data resulting from the READ_REV
5071 * mailbox command.
5072 */
5073 dma_size = *vpd_size;
Joe Perches1aee3832014-09-03 12:56:12 -04005074 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5075 &dmabuf->phys, GFP_KERNEL);
James Smartda0436e2009-05-22 14:51:39 -04005076 if (!dmabuf->virt) {
5077 kfree(dmabuf);
5078 return -ENOMEM;
5079 }
James Smartda0436e2009-05-22 14:51:39 -04005080
5081 /*
5082 * The SLI4 implementation of READ_REV conflicts at word1,
5083 * bits 31:16 and SLI4 adds vpd functionality not present
5084 * in SLI3. This code corrects the conflicts.
5085 */
5086 lpfc_read_rev(phba, mboxq);
5087 mqe = &mboxq->u.mqe;
5088 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5089 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5090 mqe->un.read_rev.word1 &= 0x0000FFFF;
5091 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5092 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5093
5094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5095 if (rc) {
5096 dma_free_coherent(&phba->pcidev->dev, dma_size,
5097 dmabuf->virt, dmabuf->phys);
James Smartdef9c7a2009-12-21 17:02:28 -05005098 kfree(dmabuf);
James Smartda0436e2009-05-22 14:51:39 -04005099 return -EIO;
5100 }
5101
James Smartda0436e2009-05-22 14:51:39 -04005102 /*
5103 * The available vpd length cannot be bigger than the
5104 * DMA buffer passed to the port. Catch the less than
5105 * case and update the caller's size.
5106 */
5107 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5108 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5109
James Smartd7c47992010-06-08 18:31:54 -04005110 memcpy(vpd, dmabuf->virt, *vpd_size);
5111
James Smartda0436e2009-05-22 14:51:39 -04005112 dma_free_coherent(&phba->pcidev->dev, dma_size,
5113 dmabuf->virt, dmabuf->phys);
5114 kfree(dmabuf);
5115 return 0;
5116}
5117
5118/**
James Smartcd1c8302011-10-10 21:33:25 -04005119 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5120 * @phba: pointer to lpfc hba data structure.
5121 *
5122 * This routine retrieves SLI4 device physical port name this PCI function
5123 * is attached to.
5124 *
5125 * Return codes
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005126 * 0 - successful
James Smartcd1c8302011-10-10 21:33:25 -04005127 * otherwise - failed to retrieve physical port name
5128 **/
5129static int
5130lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5131{
5132 LPFC_MBOXQ_t *mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04005133 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5134 struct lpfc_controller_attribute *cntl_attr;
5135 struct lpfc_mbx_get_port_name *get_port_name;
5136 void *virtaddr = NULL;
5137 uint32_t alloclen, reqlen;
5138 uint32_t shdr_status, shdr_add_status;
5139 union lpfc_sli4_cfg_shdr *shdr;
5140 char cport_name = 0;
5141 int rc;
5142
5143 /* We assume nothing at this point */
5144 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5145 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5146
5147 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5148 if (!mboxq)
5149 return -ENOMEM;
James Smartcd1c8302011-10-10 21:33:25 -04005150 /* obtain link type and link number via READ_CONFIG */
James Smartff78d8f2011-12-13 13:21:35 -05005151 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5152 lpfc_sli4_read_config(phba);
5153 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5154 goto retrieve_ppname;
James Smartcd1c8302011-10-10 21:33:25 -04005155
5156 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5157 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5158 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5159 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5160 LPFC_SLI4_MBX_NEMBED);
5161 if (alloclen < reqlen) {
5162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5163 "3084 Allocated DMA memory size (%d) is "
5164 "less than the requested DMA memory size "
5165 "(%d)\n", alloclen, reqlen);
5166 rc = -ENOMEM;
5167 goto out_free_mboxq;
5168 }
5169 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5170 virtaddr = mboxq->sge_array->addr[0];
5171 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5172 shdr = &mbx_cntl_attr->cfg_shdr;
5173 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5174 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5175 if (shdr_status || shdr_add_status || rc) {
5176 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5177 "3085 Mailbox x%x (x%x/x%x) failed, "
5178 "rc:x%x, status:x%x, add_status:x%x\n",
5179 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5180 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5181 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5182 rc, shdr_status, shdr_add_status);
5183 rc = -ENXIO;
5184 goto out_free_mboxq;
5185 }
5186 cntl_attr = &mbx_cntl_attr->cntl_attr;
5187 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5188 phba->sli4_hba.lnk_info.lnk_tp =
5189 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5190 phba->sli4_hba.lnk_info.lnk_no =
5191 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5192 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5193 "3086 lnk_type:%d, lnk_numb:%d\n",
5194 phba->sli4_hba.lnk_info.lnk_tp,
5195 phba->sli4_hba.lnk_info.lnk_no);
5196
5197retrieve_ppname:
5198 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5199 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5200 sizeof(struct lpfc_mbx_get_port_name) -
5201 sizeof(struct lpfc_sli4_cfg_mhdr),
5202 LPFC_SLI4_MBX_EMBED);
5203 get_port_name = &mboxq->u.mqe.un.get_port_name;
5204 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5205 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5206 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5207 phba->sli4_hba.lnk_info.lnk_tp);
5208 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5209 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5210 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5211 if (shdr_status || shdr_add_status || rc) {
5212 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5213 "3087 Mailbox x%x (x%x/x%x) failed: "
5214 "rc:x%x, status:x%x, add_status:x%x\n",
5215 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5216 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5217 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5218 rc, shdr_status, shdr_add_status);
5219 rc = -ENXIO;
5220 goto out_free_mboxq;
5221 }
5222 switch (phba->sli4_hba.lnk_info.lnk_no) {
5223 case LPFC_LINK_NUMBER_0:
5224 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5225 &get_port_name->u.response);
5226 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5227 break;
5228 case LPFC_LINK_NUMBER_1:
5229 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5230 &get_port_name->u.response);
5231 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5232 break;
5233 case LPFC_LINK_NUMBER_2:
5234 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5235 &get_port_name->u.response);
5236 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5237 break;
5238 case LPFC_LINK_NUMBER_3:
5239 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5240 &get_port_name->u.response);
5241 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5242 break;
5243 default:
5244 break;
5245 }
5246
5247 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5248 phba->Port[0] = cport_name;
5249 phba->Port[1] = '\0';
5250 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5251 "3091 SLI get port name: %s\n", phba->Port);
5252 }
5253
5254out_free_mboxq:
5255 if (rc != MBX_TIMEOUT) {
5256 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5257 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5258 else
5259 mempool_free(mboxq, phba->mbox_mem_pool);
5260 }
5261 return rc;
5262}
5263
5264/**
James Smartda0436e2009-05-22 14:51:39 -04005265 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5266 * @phba: pointer to lpfc hba data structure.
5267 *
5268 * This routine is called to explicitly arm the SLI4 device's completion and
5269 * event queues
5270 **/
5271static void
5272lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5273{
James Smart895427b2017-02-12 13:52:30 -08005274 int qidx;
James Smartda0436e2009-05-22 14:51:39 -04005275
5276 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5277 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
James Smart895427b2017-02-12 13:52:30 -08005278 if (phba->sli4_hba.nvmels_cq)
5279 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
5280 LPFC_QUEUE_REARM);
5281
5282 if (phba->sli4_hba.fcp_cq)
5283 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5284 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
5285 LPFC_QUEUE_REARM);
5286
5287 if (phba->sli4_hba.nvme_cq)
5288 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5289 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
5290 LPFC_QUEUE_REARM);
James Smart1ba981f2014-02-20 09:56:45 -05005291
James Smartf38fa0b2014-04-04 13:52:21 -04005292 if (phba->cfg_fof)
James Smart1ba981f2014-02-20 09:56:45 -05005293 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5294
James Smart895427b2017-02-12 13:52:30 -08005295 if (phba->sli4_hba.hba_eq)
5296 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5297 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
5298 LPFC_QUEUE_REARM);
James Smart1ba981f2014-02-20 09:56:45 -05005299
James Smart2d7dbc42017-02-12 13:52:35 -08005300 if (phba->nvmet_support) {
5301 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5302 lpfc_sli4_cq_release(
5303 phba->sli4_hba.nvmet_cqset[qidx],
5304 LPFC_QUEUE_REARM);
5305 }
James Smart2e90f4b2011-12-13 13:22:37 -05005306 }
James Smart1ba981f2014-02-20 09:56:45 -05005307
5308 if (phba->cfg_fof)
5309 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
James Smartda0436e2009-05-22 14:51:39 -04005310}
5311
5312/**
James Smart6d368e52011-05-24 11:44:12 -04005313 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5314 * @phba: Pointer to HBA context object.
5315 * @type: The resource extent type.
James Smartb76f2dc2011-07-22 18:37:42 -04005316 * @extnt_count: buffer to hold port available extent count.
5317 * @extnt_size: buffer to hold element count per extent.
James Smart6d368e52011-05-24 11:44:12 -04005318 *
James Smartb76f2dc2011-07-22 18:37:42 -04005319 * This function calls the port and retrievs the number of available
5320 * extents and their size for a particular extent type.
5321 *
5322 * Returns: 0 if successful. Nonzero otherwise.
James Smart6d368e52011-05-24 11:44:12 -04005323 **/
James Smartb76f2dc2011-07-22 18:37:42 -04005324int
James Smart6d368e52011-05-24 11:44:12 -04005325lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5326 uint16_t *extnt_count, uint16_t *extnt_size)
5327{
5328 int rc = 0;
5329 uint32_t length;
5330 uint32_t mbox_tmo;
5331 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5332 LPFC_MBOXQ_t *mbox;
5333
5334 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5335 if (!mbox)
5336 return -ENOMEM;
5337
5338 /* Find out how many extents are available for this resource type */
5339 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5340 sizeof(struct lpfc_sli4_cfg_mhdr));
5341 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5342 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5343 length, LPFC_SLI4_MBX_EMBED);
5344
5345 /* Send an extents count of 0 - the GET doesn't use it. */
5346 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5347 LPFC_SLI4_MBX_EMBED);
5348 if (unlikely(rc)) {
5349 rc = -EIO;
5350 goto err_exit;
5351 }
5352
5353 if (!phba->sli4_hba.intr_enable)
5354 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5355 else {
James Smarta183a152011-10-10 21:32:43 -04005356 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005357 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5358 }
5359 if (unlikely(rc)) {
5360 rc = -EIO;
5361 goto err_exit;
5362 }
5363
5364 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5365 if (bf_get(lpfc_mbox_hdr_status,
5366 &rsrc_info->header.cfg_shdr.response)) {
5367 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5368 "2930 Failed to get resource extents "
5369 "Status 0x%x Add'l Status 0x%x\n",
5370 bf_get(lpfc_mbox_hdr_status,
5371 &rsrc_info->header.cfg_shdr.response),
5372 bf_get(lpfc_mbox_hdr_add_status,
5373 &rsrc_info->header.cfg_shdr.response));
5374 rc = -EIO;
5375 goto err_exit;
5376 }
5377
5378 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5379 &rsrc_info->u.rsp);
5380 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5381 &rsrc_info->u.rsp);
James Smart8a9d2e82012-05-09 21:16:12 -04005382
5383 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5384 "3162 Retrieved extents type-%d from port: count:%d, "
5385 "size:%d\n", type, *extnt_count, *extnt_size);
5386
5387err_exit:
James Smart6d368e52011-05-24 11:44:12 -04005388 mempool_free(mbox, phba->mbox_mem_pool);
5389 return rc;
5390}
5391
5392/**
5393 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5394 * @phba: Pointer to HBA context object.
5395 * @type: The extent type to check.
5396 *
5397 * This function reads the current available extents from the port and checks
5398 * if the extent count or extent size has changed since the last access.
5399 * Callers use this routine post port reset to understand if there is a
5400 * extent reprovisioning requirement.
5401 *
5402 * Returns:
5403 * -Error: error indicates problem.
5404 * 1: Extent count or size has changed.
5405 * 0: No changes.
5406 **/
5407static int
5408lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5409{
5410 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5411 uint16_t size_diff, rsrc_ext_size;
5412 int rc = 0;
5413 struct lpfc_rsrc_blks *rsrc_entry;
5414 struct list_head *rsrc_blk_list = NULL;
5415
5416 size_diff = 0;
5417 curr_ext_cnt = 0;
5418 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5419 &rsrc_ext_cnt,
5420 &rsrc_ext_size);
5421 if (unlikely(rc))
5422 return -EIO;
5423
5424 switch (type) {
5425 case LPFC_RSC_TYPE_FCOE_RPI:
5426 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5427 break;
5428 case LPFC_RSC_TYPE_FCOE_VPI:
5429 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5430 break;
5431 case LPFC_RSC_TYPE_FCOE_XRI:
5432 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5433 break;
5434 case LPFC_RSC_TYPE_FCOE_VFI:
5435 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5436 break;
5437 default:
5438 break;
5439 }
5440
5441 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5442 curr_ext_cnt++;
5443 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5444 size_diff++;
5445 }
5446
5447 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5448 rc = 1;
5449
5450 return rc;
5451}
5452
5453/**
5454 * lpfc_sli4_cfg_post_extnts -
5455 * @phba: Pointer to HBA context object.
5456 * @extnt_cnt - number of available extents.
5457 * @type - the extent type (rpi, xri, vfi, vpi).
5458 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5459 * @mbox - pointer to the caller's allocated mailbox structure.
5460 *
5461 * This function executes the extents allocation request. It also
5462 * takes care of the amount of memory needed to allocate or get the
5463 * allocated extents. It is the caller's responsibility to evaluate
5464 * the response.
5465 *
5466 * Returns:
5467 * -Error: Error value describes the condition found.
5468 * 0: if successful
5469 **/
5470static int
James Smart8a9d2e82012-05-09 21:16:12 -04005471lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
James Smart6d368e52011-05-24 11:44:12 -04005472 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5473{
5474 int rc = 0;
5475 uint32_t req_len;
5476 uint32_t emb_len;
5477 uint32_t alloc_len, mbox_tmo;
5478
5479 /* Calculate the total requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -04005480 req_len = extnt_cnt * sizeof(uint16_t);
James Smart6d368e52011-05-24 11:44:12 -04005481
5482 /*
5483 * Calculate the size of an embedded mailbox. The uint32_t
5484 * accounts for extents-specific word.
5485 */
5486 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5487 sizeof(uint32_t);
5488
5489 /*
5490 * Presume the allocation and response will fit into an embedded
5491 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5492 */
5493 *emb = LPFC_SLI4_MBX_EMBED;
5494 if (req_len > emb_len) {
James Smart8a9d2e82012-05-09 21:16:12 -04005495 req_len = extnt_cnt * sizeof(uint16_t) +
James Smart6d368e52011-05-24 11:44:12 -04005496 sizeof(union lpfc_sli4_cfg_shdr) +
5497 sizeof(uint32_t);
5498 *emb = LPFC_SLI4_MBX_NEMBED;
5499 }
5500
5501 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5502 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5503 req_len, *emb);
5504 if (alloc_len < req_len) {
5505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartb76f2dc2011-07-22 18:37:42 -04005506 "2982 Allocated DMA memory size (x%x) is "
James Smart6d368e52011-05-24 11:44:12 -04005507 "less than the requested DMA memory "
5508 "size (x%x)\n", alloc_len, req_len);
5509 return -ENOMEM;
5510 }
James Smart8a9d2e82012-05-09 21:16:12 -04005511 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
James Smart6d368e52011-05-24 11:44:12 -04005512 if (unlikely(rc))
5513 return -EIO;
5514
5515 if (!phba->sli4_hba.intr_enable)
5516 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5517 else {
James Smarta183a152011-10-10 21:32:43 -04005518 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005519 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5520 }
5521
5522 if (unlikely(rc))
5523 rc = -EIO;
5524 return rc;
5525}
5526
5527/**
5528 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5529 * @phba: Pointer to HBA context object.
5530 * @type: The resource extent type to allocate.
5531 *
5532 * This function allocates the number of elements for the specified
5533 * resource type.
5534 **/
5535static int
5536lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5537{
5538 bool emb = false;
5539 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5540 uint16_t rsrc_id, rsrc_start, j, k;
5541 uint16_t *ids;
5542 int i, rc;
5543 unsigned long longs;
5544 unsigned long *bmask;
5545 struct lpfc_rsrc_blks *rsrc_blks;
5546 LPFC_MBOXQ_t *mbox;
5547 uint32_t length;
5548 struct lpfc_id_range *id_array = NULL;
5549 void *virtaddr = NULL;
5550 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5551 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5552 struct list_head *ext_blk_list;
5553
5554 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5555 &rsrc_cnt,
5556 &rsrc_size);
5557 if (unlikely(rc))
5558 return -EIO;
5559
5560 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5561 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5562 "3009 No available Resource Extents "
5563 "for resource type 0x%x: Count: 0x%x, "
5564 "Size 0x%x\n", type, rsrc_cnt,
5565 rsrc_size);
5566 return -ENOMEM;
5567 }
5568
James Smart8a9d2e82012-05-09 21:16:12 -04005569 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5570 "2903 Post resource extents type-0x%x: "
5571 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
James Smart6d368e52011-05-24 11:44:12 -04005572
5573 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5574 if (!mbox)
5575 return -ENOMEM;
5576
James Smart8a9d2e82012-05-09 21:16:12 -04005577 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005578 if (unlikely(rc)) {
5579 rc = -EIO;
5580 goto err_exit;
5581 }
5582
5583 /*
5584 * Figure out where the response is located. Then get local pointers
5585 * to the response data. The port does not guarantee to respond to
5586 * all extents counts request so update the local variable with the
5587 * allocated count from the port.
5588 */
5589 if (emb == LPFC_SLI4_MBX_EMBED) {
5590 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5591 id_array = &rsrc_ext->u.rsp.id[0];
5592 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5593 } else {
5594 virtaddr = mbox->sge_array->addr[0];
5595 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5596 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5597 id_array = &n_rsrc->id;
5598 }
5599
5600 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5601 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5602
5603 /*
5604 * Based on the resource size and count, correct the base and max
5605 * resource values.
5606 */
5607 length = sizeof(struct lpfc_rsrc_blks);
5608 switch (type) {
5609 case LPFC_RSC_TYPE_FCOE_RPI:
5610 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5611 sizeof(unsigned long),
5612 GFP_KERNEL);
5613 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5614 rc = -ENOMEM;
5615 goto err_exit;
5616 }
5617 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5618 sizeof(uint16_t),
5619 GFP_KERNEL);
5620 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5621 kfree(phba->sli4_hba.rpi_bmask);
5622 rc = -ENOMEM;
5623 goto err_exit;
5624 }
5625
5626 /*
5627 * The next_rpi was initialized with the maximum available
5628 * count but the port may allocate a smaller number. Catch
5629 * that case and update the next_rpi.
5630 */
5631 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5632
5633 /* Initialize local ptrs for common extent processing later. */
5634 bmask = phba->sli4_hba.rpi_bmask;
5635 ids = phba->sli4_hba.rpi_ids;
5636 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5637 break;
5638 case LPFC_RSC_TYPE_FCOE_VPI:
5639 phba->vpi_bmask = kzalloc(longs *
5640 sizeof(unsigned long),
5641 GFP_KERNEL);
5642 if (unlikely(!phba->vpi_bmask)) {
5643 rc = -ENOMEM;
5644 goto err_exit;
5645 }
5646 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5647 sizeof(uint16_t),
5648 GFP_KERNEL);
5649 if (unlikely(!phba->vpi_ids)) {
5650 kfree(phba->vpi_bmask);
5651 rc = -ENOMEM;
5652 goto err_exit;
5653 }
5654
5655 /* Initialize local ptrs for common extent processing later. */
5656 bmask = phba->vpi_bmask;
5657 ids = phba->vpi_ids;
5658 ext_blk_list = &phba->lpfc_vpi_blk_list;
5659 break;
5660 case LPFC_RSC_TYPE_FCOE_XRI:
5661 phba->sli4_hba.xri_bmask = kzalloc(longs *
5662 sizeof(unsigned long),
5663 GFP_KERNEL);
5664 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5665 rc = -ENOMEM;
5666 goto err_exit;
5667 }
James Smart8a9d2e82012-05-09 21:16:12 -04005668 phba->sli4_hba.max_cfg_param.xri_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04005669 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5670 sizeof(uint16_t),
5671 GFP_KERNEL);
5672 if (unlikely(!phba->sli4_hba.xri_ids)) {
5673 kfree(phba->sli4_hba.xri_bmask);
5674 rc = -ENOMEM;
5675 goto err_exit;
5676 }
5677
5678 /* Initialize local ptrs for common extent processing later. */
5679 bmask = phba->sli4_hba.xri_bmask;
5680 ids = phba->sli4_hba.xri_ids;
5681 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5682 break;
5683 case LPFC_RSC_TYPE_FCOE_VFI:
5684 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5685 sizeof(unsigned long),
5686 GFP_KERNEL);
5687 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5688 rc = -ENOMEM;
5689 goto err_exit;
5690 }
5691 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5692 sizeof(uint16_t),
5693 GFP_KERNEL);
5694 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5695 kfree(phba->sli4_hba.vfi_bmask);
5696 rc = -ENOMEM;
5697 goto err_exit;
5698 }
5699
5700 /* Initialize local ptrs for common extent processing later. */
5701 bmask = phba->sli4_hba.vfi_bmask;
5702 ids = phba->sli4_hba.vfi_ids;
5703 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5704 break;
5705 default:
5706 /* Unsupported Opcode. Fail call. */
5707 id_array = NULL;
5708 bmask = NULL;
5709 ids = NULL;
5710 ext_blk_list = NULL;
5711 goto err_exit;
5712 }
5713
5714 /*
5715 * Complete initializing the extent configuration with the
5716 * allocated ids assigned to this function. The bitmask serves
5717 * as an index into the array and manages the available ids. The
5718 * array just stores the ids communicated to the port via the wqes.
5719 */
5720 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5721 if ((i % 2) == 0)
5722 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5723 &id_array[k]);
5724 else
5725 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5726 &id_array[k]);
5727
5728 rsrc_blks = kzalloc(length, GFP_KERNEL);
5729 if (unlikely(!rsrc_blks)) {
5730 rc = -ENOMEM;
5731 kfree(bmask);
5732 kfree(ids);
5733 goto err_exit;
5734 }
5735 rsrc_blks->rsrc_start = rsrc_id;
5736 rsrc_blks->rsrc_size = rsrc_size;
5737 list_add_tail(&rsrc_blks->list, ext_blk_list);
5738 rsrc_start = rsrc_id;
James Smart895427b2017-02-12 13:52:30 -08005739 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
James Smart6d368e52011-05-24 11:44:12 -04005740 phba->sli4_hba.scsi_xri_start = rsrc_start +
James Smart895427b2017-02-12 13:52:30 -08005741 lpfc_sli4_get_iocb_cnt(phba);
5742 phba->sli4_hba.nvme_xri_start =
5743 phba->sli4_hba.scsi_xri_start +
5744 phba->sli4_hba.scsi_xri_max;
5745 }
James Smart6d368e52011-05-24 11:44:12 -04005746
5747 while (rsrc_id < (rsrc_start + rsrc_size)) {
5748 ids[j] = rsrc_id;
5749 rsrc_id++;
5750 j++;
5751 }
5752 /* Entire word processed. Get next word.*/
5753 if ((i % 2) == 1)
5754 k++;
5755 }
5756 err_exit:
5757 lpfc_sli4_mbox_cmd_free(phba, mbox);
5758 return rc;
5759}
5760
James Smart895427b2017-02-12 13:52:30 -08005761
5762
James Smart6d368e52011-05-24 11:44:12 -04005763/**
5764 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5765 * @phba: Pointer to HBA context object.
5766 * @type: the extent's type.
5767 *
5768 * This function deallocates all extents of a particular resource type.
5769 * SLI4 does not allow for deallocating a particular extent range. It
5770 * is the caller's responsibility to release all kernel memory resources.
5771 **/
5772static int
5773lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5774{
5775 int rc;
5776 uint32_t length, mbox_tmo = 0;
5777 LPFC_MBOXQ_t *mbox;
5778 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5779 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5780
5781 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5782 if (!mbox)
5783 return -ENOMEM;
5784
5785 /*
5786 * This function sends an embedded mailbox because it only sends the
5787 * the resource type. All extents of this type are released by the
5788 * port.
5789 */
5790 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5791 sizeof(struct lpfc_sli4_cfg_mhdr));
5792 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5793 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5794 length, LPFC_SLI4_MBX_EMBED);
5795
5796 /* Send an extents count of 0 - the dealloc doesn't use it. */
5797 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5798 LPFC_SLI4_MBX_EMBED);
5799 if (unlikely(rc)) {
5800 rc = -EIO;
5801 goto out_free_mbox;
5802 }
5803 if (!phba->sli4_hba.intr_enable)
5804 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5805 else {
James Smarta183a152011-10-10 21:32:43 -04005806 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005807 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5808 }
5809 if (unlikely(rc)) {
5810 rc = -EIO;
5811 goto out_free_mbox;
5812 }
5813
5814 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5815 if (bf_get(lpfc_mbox_hdr_status,
5816 &dealloc_rsrc->header.cfg_shdr.response)) {
5817 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5818 "2919 Failed to release resource extents "
5819 "for type %d - Status 0x%x Add'l Status 0x%x. "
5820 "Resource memory not released.\n",
5821 type,
5822 bf_get(lpfc_mbox_hdr_status,
5823 &dealloc_rsrc->header.cfg_shdr.response),
5824 bf_get(lpfc_mbox_hdr_add_status,
5825 &dealloc_rsrc->header.cfg_shdr.response));
5826 rc = -EIO;
5827 goto out_free_mbox;
5828 }
5829
5830 /* Release kernel memory resources for the specific type. */
5831 switch (type) {
5832 case LPFC_RSC_TYPE_FCOE_VPI:
5833 kfree(phba->vpi_bmask);
5834 kfree(phba->vpi_ids);
5835 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5836 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5837 &phba->lpfc_vpi_blk_list, list) {
5838 list_del_init(&rsrc_blk->list);
5839 kfree(rsrc_blk);
5840 }
James Smart16a3a202013-04-17 20:14:38 -04005841 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04005842 break;
5843 case LPFC_RSC_TYPE_FCOE_XRI:
5844 kfree(phba->sli4_hba.xri_bmask);
5845 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04005846 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5847 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5848 list_del_init(&rsrc_blk->list);
5849 kfree(rsrc_blk);
5850 }
5851 break;
5852 case LPFC_RSC_TYPE_FCOE_VFI:
5853 kfree(phba->sli4_hba.vfi_bmask);
5854 kfree(phba->sli4_hba.vfi_ids);
5855 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5856 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5857 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5858 list_del_init(&rsrc_blk->list);
5859 kfree(rsrc_blk);
5860 }
5861 break;
5862 case LPFC_RSC_TYPE_FCOE_RPI:
5863 /* RPI bitmask and physical id array are cleaned up earlier. */
5864 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5865 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5866 list_del_init(&rsrc_blk->list);
5867 kfree(rsrc_blk);
5868 }
5869 break;
5870 default:
5871 break;
5872 }
5873
5874 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5875
5876 out_free_mbox:
5877 mempool_free(mbox, phba->mbox_mem_pool);
5878 return rc;
5879}
5880
Baoyou Xiebd4b3e52016-09-25 13:44:55 +08005881static void
James Smart7bdedb32016-07-06 12:36:00 -07005882lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5883 uint32_t feature)
James Smart65791f12016-07-06 12:35:56 -07005884{
James Smart65791f12016-07-06 12:35:56 -07005885 uint32_t len;
James Smart65791f12016-07-06 12:35:56 -07005886
James Smart65791f12016-07-06 12:35:56 -07005887 len = sizeof(struct lpfc_mbx_set_feature) -
5888 sizeof(struct lpfc_sli4_cfg_mhdr);
5889 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5890 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5891 LPFC_SLI4_MBX_EMBED);
James Smart65791f12016-07-06 12:35:56 -07005892
James Smart7bdedb32016-07-06 12:36:00 -07005893 switch (feature) {
5894 case LPFC_SET_UE_RECOVERY:
5895 bf_set(lpfc_mbx_set_feature_UER,
5896 &mbox->u.mqe.un.set_feature, 1);
5897 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5898 mbox->u.mqe.un.set_feature.param_len = 8;
5899 break;
5900 case LPFC_SET_MDS_DIAGS:
5901 bf_set(lpfc_mbx_set_feature_mds,
5902 &mbox->u.mqe.un.set_feature, 1);
5903 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5904 &mbox->u.mqe.un.set_feature, 0);
5905 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5906 mbox->u.mqe.un.set_feature.param_len = 8;
5907 break;
James Smart65791f12016-07-06 12:35:56 -07005908 }
James Smart7bdedb32016-07-06 12:36:00 -07005909
5910 return;
James Smart65791f12016-07-06 12:35:56 -07005911}
5912
James Smart6d368e52011-05-24 11:44:12 -04005913/**
5914 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5915 * @phba: Pointer to HBA context object.
5916 *
5917 * This function allocates all SLI4 resource identifiers.
5918 **/
5919int
5920lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5921{
5922 int i, rc, error = 0;
5923 uint16_t count, base;
5924 unsigned long longs;
5925
James Smartff78d8f2011-12-13 13:21:35 -05005926 if (!phba->sli4_hba.rpi_hdrs_in_use)
5927 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart6d368e52011-05-24 11:44:12 -04005928 if (phba->sli4_hba.extents_in_use) {
5929 /*
5930 * The port supports resource extents. The XRI, VPI, VFI, RPI
5931 * resource extent count must be read and allocated before
5932 * provisioning the resource id arrays.
5933 */
5934 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5935 LPFC_IDX_RSRC_RDY) {
5936 /*
5937 * Extent-based resources are set - the driver could
5938 * be in a port reset. Figure out if any corrective
5939 * actions need to be taken.
5940 */
5941 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5942 LPFC_RSC_TYPE_FCOE_VFI);
5943 if (rc != 0)
5944 error++;
5945 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5946 LPFC_RSC_TYPE_FCOE_VPI);
5947 if (rc != 0)
5948 error++;
5949 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5950 LPFC_RSC_TYPE_FCOE_XRI);
5951 if (rc != 0)
5952 error++;
5953 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5954 LPFC_RSC_TYPE_FCOE_RPI);
5955 if (rc != 0)
5956 error++;
5957
5958 /*
5959 * It's possible that the number of resources
5960 * provided to this port instance changed between
5961 * resets. Detect this condition and reallocate
5962 * resources. Otherwise, there is no action.
5963 */
5964 if (error) {
5965 lpfc_printf_log(phba, KERN_INFO,
5966 LOG_MBOX | LOG_INIT,
5967 "2931 Detected extent resource "
5968 "change. Reallocating all "
5969 "extents.\n");
5970 rc = lpfc_sli4_dealloc_extent(phba,
5971 LPFC_RSC_TYPE_FCOE_VFI);
5972 rc = lpfc_sli4_dealloc_extent(phba,
5973 LPFC_RSC_TYPE_FCOE_VPI);
5974 rc = lpfc_sli4_dealloc_extent(phba,
5975 LPFC_RSC_TYPE_FCOE_XRI);
5976 rc = lpfc_sli4_dealloc_extent(phba,
5977 LPFC_RSC_TYPE_FCOE_RPI);
5978 } else
5979 return 0;
5980 }
5981
5982 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5983 if (unlikely(rc))
5984 goto err_exit;
5985
5986 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5987 if (unlikely(rc))
5988 goto err_exit;
5989
5990 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5991 if (unlikely(rc))
5992 goto err_exit;
5993
5994 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5995 if (unlikely(rc))
5996 goto err_exit;
5997 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5998 LPFC_IDX_RSRC_RDY);
5999 return rc;
6000 } else {
6001 /*
6002 * The port does not support resource extents. The XRI, VPI,
6003 * VFI, RPI resource ids were determined from READ_CONFIG.
6004 * Just allocate the bitmasks and provision the resource id
6005 * arrays. If a port reset is active, the resources don't
6006 * need any action - just exit.
6007 */
6008 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
James Smartff78d8f2011-12-13 13:21:35 -05006009 LPFC_IDX_RSRC_RDY) {
6010 lpfc_sli4_dealloc_resource_identifiers(phba);
6011 lpfc_sli4_remove_rpis(phba);
6012 }
James Smart6d368e52011-05-24 11:44:12 -04006013 /* RPIs. */
6014 count = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart0a630c22013-01-03 15:44:09 -05006015 if (count <= 0) {
6016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6017 "3279 Invalid provisioning of "
6018 "rpi:%d\n", count);
6019 rc = -EINVAL;
6020 goto err_exit;
6021 }
James Smart6d368e52011-05-24 11:44:12 -04006022 base = phba->sli4_hba.max_cfg_param.rpi_base;
6023 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6024 phba->sli4_hba.rpi_bmask = kzalloc(longs *
6025 sizeof(unsigned long),
6026 GFP_KERNEL);
6027 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6028 rc = -ENOMEM;
6029 goto err_exit;
6030 }
6031 phba->sli4_hba.rpi_ids = kzalloc(count *
6032 sizeof(uint16_t),
6033 GFP_KERNEL);
6034 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6035 rc = -ENOMEM;
6036 goto free_rpi_bmask;
6037 }
6038
6039 for (i = 0; i < count; i++)
6040 phba->sli4_hba.rpi_ids[i] = base + i;
6041
6042 /* VPIs. */
6043 count = phba->sli4_hba.max_cfg_param.max_vpi;
James Smart0a630c22013-01-03 15:44:09 -05006044 if (count <= 0) {
6045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6046 "3280 Invalid provisioning of "
6047 "vpi:%d\n", count);
6048 rc = -EINVAL;
6049 goto free_rpi_ids;
6050 }
James Smart6d368e52011-05-24 11:44:12 -04006051 base = phba->sli4_hba.max_cfg_param.vpi_base;
6052 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6053 phba->vpi_bmask = kzalloc(longs *
6054 sizeof(unsigned long),
6055 GFP_KERNEL);
6056 if (unlikely(!phba->vpi_bmask)) {
6057 rc = -ENOMEM;
6058 goto free_rpi_ids;
6059 }
6060 phba->vpi_ids = kzalloc(count *
6061 sizeof(uint16_t),
6062 GFP_KERNEL);
6063 if (unlikely(!phba->vpi_ids)) {
6064 rc = -ENOMEM;
6065 goto free_vpi_bmask;
6066 }
6067
6068 for (i = 0; i < count; i++)
6069 phba->vpi_ids[i] = base + i;
6070
6071 /* XRIs. */
6072 count = phba->sli4_hba.max_cfg_param.max_xri;
James Smart0a630c22013-01-03 15:44:09 -05006073 if (count <= 0) {
6074 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6075 "3281 Invalid provisioning of "
6076 "xri:%d\n", count);
6077 rc = -EINVAL;
6078 goto free_vpi_ids;
6079 }
James Smart6d368e52011-05-24 11:44:12 -04006080 base = phba->sli4_hba.max_cfg_param.xri_base;
6081 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6082 phba->sli4_hba.xri_bmask = kzalloc(longs *
6083 sizeof(unsigned long),
6084 GFP_KERNEL);
6085 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6086 rc = -ENOMEM;
6087 goto free_vpi_ids;
6088 }
James Smart41899be2012-03-01 22:34:19 -05006089 phba->sli4_hba.max_cfg_param.xri_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006090 phba->sli4_hba.xri_ids = kzalloc(count *
6091 sizeof(uint16_t),
6092 GFP_KERNEL);
6093 if (unlikely(!phba->sli4_hba.xri_ids)) {
6094 rc = -ENOMEM;
6095 goto free_xri_bmask;
6096 }
6097
6098 for (i = 0; i < count; i++)
6099 phba->sli4_hba.xri_ids[i] = base + i;
6100
6101 /* VFIs. */
6102 count = phba->sli4_hba.max_cfg_param.max_vfi;
James Smart0a630c22013-01-03 15:44:09 -05006103 if (count <= 0) {
6104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6105 "3282 Invalid provisioning of "
6106 "vfi:%d\n", count);
6107 rc = -EINVAL;
6108 goto free_xri_ids;
6109 }
James Smart6d368e52011-05-24 11:44:12 -04006110 base = phba->sli4_hba.max_cfg_param.vfi_base;
6111 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6112 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6113 sizeof(unsigned long),
6114 GFP_KERNEL);
6115 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6116 rc = -ENOMEM;
6117 goto free_xri_ids;
6118 }
6119 phba->sli4_hba.vfi_ids = kzalloc(count *
6120 sizeof(uint16_t),
6121 GFP_KERNEL);
6122 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6123 rc = -ENOMEM;
6124 goto free_vfi_bmask;
6125 }
6126
6127 for (i = 0; i < count; i++)
6128 phba->sli4_hba.vfi_ids[i] = base + i;
6129
6130 /*
6131 * Mark all resources ready. An HBA reset doesn't need
6132 * to reset the initialization.
6133 */
6134 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6135 LPFC_IDX_RSRC_RDY);
6136 return 0;
6137 }
6138
6139 free_vfi_bmask:
6140 kfree(phba->sli4_hba.vfi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006141 phba->sli4_hba.vfi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006142 free_xri_ids:
6143 kfree(phba->sli4_hba.xri_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006144 phba->sli4_hba.xri_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006145 free_xri_bmask:
6146 kfree(phba->sli4_hba.xri_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006147 phba->sli4_hba.xri_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006148 free_vpi_ids:
6149 kfree(phba->vpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006150 phba->vpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006151 free_vpi_bmask:
6152 kfree(phba->vpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006153 phba->vpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006154 free_rpi_ids:
6155 kfree(phba->sli4_hba.rpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006156 phba->sli4_hba.rpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006157 free_rpi_bmask:
6158 kfree(phba->sli4_hba.rpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006159 phba->sli4_hba.rpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006160 err_exit:
6161 return rc;
6162}
6163
6164/**
6165 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6166 * @phba: Pointer to HBA context object.
6167 *
6168 * This function allocates the number of elements for the specified
6169 * resource type.
6170 **/
6171int
6172lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6173{
6174 if (phba->sli4_hba.extents_in_use) {
6175 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6176 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6177 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6178 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6179 } else {
6180 kfree(phba->vpi_bmask);
James Smart16a3a202013-04-17 20:14:38 -04006181 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006182 kfree(phba->vpi_ids);
6183 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6184 kfree(phba->sli4_hba.xri_bmask);
6185 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04006186 kfree(phba->sli4_hba.vfi_bmask);
6187 kfree(phba->sli4_hba.vfi_ids);
6188 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6189 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6190 }
6191
6192 return 0;
6193}
6194
6195/**
James Smartb76f2dc2011-07-22 18:37:42 -04006196 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6197 * @phba: Pointer to HBA context object.
6198 * @type: The resource extent type.
6199 * @extnt_count: buffer to hold port extent count response
6200 * @extnt_size: buffer to hold port extent size response.
6201 *
6202 * This function calls the port to read the host allocated extents
6203 * for a particular type.
6204 **/
6205int
6206lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6207 uint16_t *extnt_cnt, uint16_t *extnt_size)
6208{
6209 bool emb;
6210 int rc = 0;
6211 uint16_t curr_blks = 0;
6212 uint32_t req_len, emb_len;
6213 uint32_t alloc_len, mbox_tmo;
6214 struct list_head *blk_list_head;
6215 struct lpfc_rsrc_blks *rsrc_blk;
6216 LPFC_MBOXQ_t *mbox;
6217 void *virtaddr = NULL;
6218 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6219 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6220 union lpfc_sli4_cfg_shdr *shdr;
6221
6222 switch (type) {
6223 case LPFC_RSC_TYPE_FCOE_VPI:
6224 blk_list_head = &phba->lpfc_vpi_blk_list;
6225 break;
6226 case LPFC_RSC_TYPE_FCOE_XRI:
6227 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6228 break;
6229 case LPFC_RSC_TYPE_FCOE_VFI:
6230 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6231 break;
6232 case LPFC_RSC_TYPE_FCOE_RPI:
6233 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6234 break;
6235 default:
6236 return -EIO;
6237 }
6238
6239 /* Count the number of extents currently allocatd for this type. */
6240 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6241 if (curr_blks == 0) {
6242 /*
6243 * The GET_ALLOCATED mailbox does not return the size,
6244 * just the count. The size should be just the size
6245 * stored in the current allocated block and all sizes
6246 * for an extent type are the same so set the return
6247 * value now.
6248 */
6249 *extnt_size = rsrc_blk->rsrc_size;
6250 }
6251 curr_blks++;
6252 }
6253
James Smartb76f2dc2011-07-22 18:37:42 -04006254 /*
6255 * Calculate the size of an embedded mailbox. The uint32_t
6256 * accounts for extents-specific word.
6257 */
6258 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6259 sizeof(uint32_t);
6260
6261 /*
6262 * Presume the allocation and response will fit into an embedded
6263 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6264 */
6265 emb = LPFC_SLI4_MBX_EMBED;
6266 req_len = emb_len;
6267 if (req_len > emb_len) {
6268 req_len = curr_blks * sizeof(uint16_t) +
6269 sizeof(union lpfc_sli4_cfg_shdr) +
6270 sizeof(uint32_t);
6271 emb = LPFC_SLI4_MBX_NEMBED;
6272 }
6273
6274 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6275 if (!mbox)
6276 return -ENOMEM;
6277 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6278
6279 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6280 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6281 req_len, emb);
6282 if (alloc_len < req_len) {
6283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6284 "2983 Allocated DMA memory size (x%x) is "
6285 "less than the requested DMA memory "
6286 "size (x%x)\n", alloc_len, req_len);
6287 rc = -ENOMEM;
6288 goto err_exit;
6289 }
6290 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6291 if (unlikely(rc)) {
6292 rc = -EIO;
6293 goto err_exit;
6294 }
6295
6296 if (!phba->sli4_hba.intr_enable)
6297 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6298 else {
James Smarta183a152011-10-10 21:32:43 -04006299 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smartb76f2dc2011-07-22 18:37:42 -04006300 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6301 }
6302
6303 if (unlikely(rc)) {
6304 rc = -EIO;
6305 goto err_exit;
6306 }
6307
6308 /*
6309 * Figure out where the response is located. Then get local pointers
6310 * to the response data. The port does not guarantee to respond to
6311 * all extents counts request so update the local variable with the
6312 * allocated count from the port.
6313 */
6314 if (emb == LPFC_SLI4_MBX_EMBED) {
6315 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6316 shdr = &rsrc_ext->header.cfg_shdr;
6317 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6318 } else {
6319 virtaddr = mbox->sge_array->addr[0];
6320 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6321 shdr = &n_rsrc->cfg_shdr;
6322 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6323 }
6324
6325 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6326 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6327 "2984 Failed to read allocated resources "
6328 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6329 type,
6330 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6331 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6332 rc = -EIO;
6333 goto err_exit;
6334 }
6335 err_exit:
6336 lpfc_sli4_mbox_cmd_free(phba, mbox);
6337 return rc;
6338}
6339
6340/**
James Smart0ef69962017-04-21 16:04:50 -07006341 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
James Smart8a9d2e82012-05-09 21:16:12 -04006342 * @phba: pointer to lpfc hba data structure.
James Smart895427b2017-02-12 13:52:30 -08006343 * @pring: Pointer to driver SLI ring object.
6344 * @sgl_list: linked link of sgl buffers to post
6345 * @cnt: number of linked list buffers
James Smart8a9d2e82012-05-09 21:16:12 -04006346 *
James Smart895427b2017-02-12 13:52:30 -08006347 * This routine walks the list of buffers that have been allocated and
James Smart8a9d2e82012-05-09 21:16:12 -04006348 * repost them to the port by using SGL block post. This is needed after a
6349 * pci_function_reset/warm_start or start. It attempts to construct blocks
James Smart895427b2017-02-12 13:52:30 -08006350 * of buffer sgls which contains contiguous xris and uses the non-embedded
6351 * SGL block post mailbox commands to post them to the port. For single
James Smart8a9d2e82012-05-09 21:16:12 -04006352 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6353 * mailbox command for posting.
6354 *
6355 * Returns: 0 = success, non-zero failure.
6356 **/
6357static int
James Smart895427b2017-02-12 13:52:30 -08006358lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6359 struct list_head *sgl_list, int cnt)
James Smart8a9d2e82012-05-09 21:16:12 -04006360{
6361 struct lpfc_sglq *sglq_entry = NULL;
6362 struct lpfc_sglq *sglq_entry_next = NULL;
6363 struct lpfc_sglq *sglq_entry_first = NULL;
James Smart895427b2017-02-12 13:52:30 -08006364 int status, total_cnt;
6365 int post_cnt = 0, num_posted = 0, block_cnt = 0;
James Smart8a9d2e82012-05-09 21:16:12 -04006366 int last_xritag = NO_XRI;
6367 LIST_HEAD(prep_sgl_list);
6368 LIST_HEAD(blck_sgl_list);
6369 LIST_HEAD(allc_sgl_list);
6370 LIST_HEAD(post_sgl_list);
6371 LIST_HEAD(free_sgl_list);
6372
James Smart38c20672013-03-01 16:37:44 -05006373 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08006374 spin_lock(&phba->sli4_hba.sgl_list_lock);
6375 list_splice_init(sgl_list, &allc_sgl_list);
6376 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05006377 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04006378
James Smart895427b2017-02-12 13:52:30 -08006379 total_cnt = cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006380 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6381 &allc_sgl_list, list) {
6382 list_del_init(&sglq_entry->list);
6383 block_cnt++;
6384 if ((last_xritag != NO_XRI) &&
6385 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6386 /* a hole in xri block, form a sgl posting block */
6387 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6388 post_cnt = block_cnt - 1;
6389 /* prepare list for next posting block */
6390 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6391 block_cnt = 1;
6392 } else {
6393 /* prepare list for next posting block */
6394 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6395 /* enough sgls for non-embed sgl mbox command */
6396 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6397 list_splice_init(&prep_sgl_list,
6398 &blck_sgl_list);
6399 post_cnt = block_cnt;
6400 block_cnt = 0;
6401 }
6402 }
6403 num_posted++;
6404
6405 /* keep track of last sgl's xritag */
6406 last_xritag = sglq_entry->sli4_xritag;
6407
James Smart895427b2017-02-12 13:52:30 -08006408 /* end of repost sgl list condition for buffers */
6409 if (num_posted == total_cnt) {
James Smart8a9d2e82012-05-09 21:16:12 -04006410 if (post_cnt == 0) {
6411 list_splice_init(&prep_sgl_list,
6412 &blck_sgl_list);
6413 post_cnt = block_cnt;
6414 } else if (block_cnt == 1) {
6415 status = lpfc_sli4_post_sgl(phba,
6416 sglq_entry->phys, 0,
6417 sglq_entry->sli4_xritag);
6418 if (!status) {
6419 /* successful, put sgl to posted list */
6420 list_add_tail(&sglq_entry->list,
6421 &post_sgl_list);
6422 } else {
6423 /* Failure, put sgl to free list */
6424 lpfc_printf_log(phba, KERN_WARNING,
6425 LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08006426 "3159 Failed to post "
James Smart8a9d2e82012-05-09 21:16:12 -04006427 "sgl, xritag:x%x\n",
6428 sglq_entry->sli4_xritag);
6429 list_add_tail(&sglq_entry->list,
6430 &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04006431 total_cnt--;
James Smart8a9d2e82012-05-09 21:16:12 -04006432 }
6433 }
6434 }
6435
6436 /* continue until a nembed page worth of sgls */
6437 if (post_cnt == 0)
6438 continue;
6439
James Smart895427b2017-02-12 13:52:30 -08006440 /* post the buffer list sgls as a block */
6441 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6442 post_cnt);
James Smart8a9d2e82012-05-09 21:16:12 -04006443
6444 if (!status) {
6445 /* success, put sgl list to posted sgl list */
6446 list_splice_init(&blck_sgl_list, &post_sgl_list);
6447 } else {
6448 /* Failure, put sgl list to free sgl list */
6449 sglq_entry_first = list_first_entry(&blck_sgl_list,
6450 struct lpfc_sglq,
6451 list);
6452 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08006453 "3160 Failed to post sgl-list, "
James Smart8a9d2e82012-05-09 21:16:12 -04006454 "xritag:x%x-x%x\n",
6455 sglq_entry_first->sli4_xritag,
6456 (sglq_entry_first->sli4_xritag +
6457 post_cnt - 1));
6458 list_splice_init(&blck_sgl_list, &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04006459 total_cnt -= post_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006460 }
6461
6462 /* don't reset xirtag due to hole in xri block */
6463 if (block_cnt == 0)
6464 last_xritag = NO_XRI;
6465
James Smart895427b2017-02-12 13:52:30 -08006466 /* reset sgl post count for next round of posting */
James Smart8a9d2e82012-05-09 21:16:12 -04006467 post_cnt = 0;
6468 }
6469
James Smart895427b2017-02-12 13:52:30 -08006470 /* free the sgls failed to post */
James Smart8a9d2e82012-05-09 21:16:12 -04006471 lpfc_free_sgl_list(phba, &free_sgl_list);
6472
James Smart895427b2017-02-12 13:52:30 -08006473 /* push sgls posted to the available list */
James Smart8a9d2e82012-05-09 21:16:12 -04006474 if (!list_empty(&post_sgl_list)) {
James Smart38c20672013-03-01 16:37:44 -05006475 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08006476 spin_lock(&phba->sli4_hba.sgl_list_lock);
6477 list_splice_init(&post_sgl_list, sgl_list);
6478 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05006479 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04006480 } else {
6481 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08006482 "3161 Failure to post sgl to port.\n");
James Smart8a9d2e82012-05-09 21:16:12 -04006483 return -EIO;
6484 }
James Smart895427b2017-02-12 13:52:30 -08006485
6486 /* return the number of XRIs actually posted */
6487 return total_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006488}
6489
James Smart61bda8f2016-10-13 15:06:05 -07006490void
6491lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6492{
6493 uint32_t len;
6494
6495 len = sizeof(struct lpfc_mbx_set_host_data) -
6496 sizeof(struct lpfc_sli4_cfg_mhdr);
6497 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6498 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6499 LPFC_SLI4_MBX_EMBED);
6500
6501 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
James Smartb2fd1032016-12-19 15:07:21 -08006502 mbox->u.mqe.un.set_host_data.param_len =
6503 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
James Smart61bda8f2016-10-13 15:06:05 -07006504 snprintf(mbox->u.mqe.un.set_host_data.data,
6505 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6506 "Linux %s v"LPFC_DRIVER_VERSION,
6507 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6508}
6509
James Smart8a9d2e82012-05-09 21:16:12 -04006510/**
Masahiro Yamada183b8022017-02-27 14:29:20 -08006511 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
James Smartda0436e2009-05-22 14:51:39 -04006512 * @phba: Pointer to HBA context object.
6513 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08006514 * This function is the main SLI4 device initialization PCI function. This
6515 * function is called by the HBA initialization code, HBA reset code and
James Smartda0436e2009-05-22 14:51:39 -04006516 * HBA error attention handler code. Caller is not required to hold any
6517 * locks.
6518 **/
6519int
6520lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6521{
James Smart2d7dbc42017-02-12 13:52:35 -08006522 int rc, i;
James Smartda0436e2009-05-22 14:51:39 -04006523 LPFC_MBOXQ_t *mboxq;
6524 struct lpfc_mqe *mqe;
6525 uint8_t *vpd;
6526 uint32_t vpd_size;
6527 uint32_t ftr_rsp = 0;
6528 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6529 struct lpfc_vport *vport = phba->pport;
6530 struct lpfc_dmabuf *mp;
James Smart2d7dbc42017-02-12 13:52:35 -08006531 struct lpfc_rqb *rqbp;
James Smartda0436e2009-05-22 14:51:39 -04006532
6533 /* Perform a PCI function reset to start from clean */
6534 rc = lpfc_pci_function_reset(phba);
6535 if (unlikely(rc))
6536 return -ENODEV;
6537
6538 /* Check the HBA Host Status Register for readyness */
6539 rc = lpfc_sli4_post_status_check(phba);
6540 if (unlikely(rc))
6541 return -ENODEV;
6542 else {
6543 spin_lock_irq(&phba->hbalock);
6544 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6545 spin_unlock_irq(&phba->hbalock);
6546 }
6547
6548 /*
6549 * Allocate a single mailbox container for initializing the
6550 * port.
6551 */
6552 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6553 if (!mboxq)
6554 return -ENOMEM;
6555
James Smartda0436e2009-05-22 14:51:39 -04006556 /* Issue READ_REV to collect vpd and FW information. */
James Smart49198b32010-04-06 15:04:33 -04006557 vpd_size = SLI4_PAGE_SIZE;
James Smartda0436e2009-05-22 14:51:39 -04006558 vpd = kzalloc(vpd_size, GFP_KERNEL);
6559 if (!vpd) {
6560 rc = -ENOMEM;
6561 goto out_free_mbox;
6562 }
6563
6564 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
James Smart76a95d72010-11-20 23:11:48 -05006565 if (unlikely(rc)) {
6566 kfree(vpd);
6567 goto out_free_mbox;
6568 }
James Smart572709e2013-07-15 18:32:43 -04006569
James Smartda0436e2009-05-22 14:51:39 -04006570 mqe = &mboxq->u.mqe;
James Smartf1126682009-06-10 17:22:44 -04006571 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
James Smartb5c53952016-03-31 14:12:30 -07006572 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
James Smart76a95d72010-11-20 23:11:48 -05006573 phba->hba_flag |= HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07006574 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6575 } else {
James Smart76a95d72010-11-20 23:11:48 -05006576 phba->hba_flag &= ~HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07006577 }
James Smart45ed1192009-10-02 15:17:02 -04006578
6579 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6580 LPFC_DCBX_CEE_MODE)
6581 phba->hba_flag |= HBA_FIP_SUPPORT;
6582 else
6583 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6584
James Smart4f2e66c2012-05-09 21:17:07 -04006585 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6586
James Smartc31098c2011-04-16 11:03:33 -04006587 if (phba->sli_rev != LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -04006588 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6589 "0376 READ_REV Error. SLI Level %d "
6590 "FCoE enabled %d\n",
James Smart76a95d72010-11-20 23:11:48 -05006591 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
James Smartda0436e2009-05-22 14:51:39 -04006592 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006593 kfree(vpd);
6594 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006595 }
James Smartcd1c8302011-10-10 21:33:25 -04006596
6597 /*
James Smartff78d8f2011-12-13 13:21:35 -05006598 * Continue initialization with default values even if driver failed
6599 * to read FCoE param config regions, only read parameters if the
6600 * board is FCoE
6601 */
6602 if (phba->hba_flag & HBA_FCOE_MODE &&
6603 lpfc_sli4_read_fcoe_params(phba))
6604 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6605 "2570 Failed to read FCoE parameters\n");
6606
6607 /*
James Smartcd1c8302011-10-10 21:33:25 -04006608 * Retrieve sli4 device physical port name, failure of doing it
6609 * is considered as non-fatal.
6610 */
6611 rc = lpfc_sli4_retrieve_pport_name(phba);
6612 if (!rc)
6613 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6614 "3080 Successful retrieving SLI4 device "
6615 "physical port name: %s.\n", phba->Port);
6616
James Smartda0436e2009-05-22 14:51:39 -04006617 /*
6618 * Evaluate the read rev and vpd data. Populate the driver
6619 * state with the results. If this routine fails, the failure
6620 * is not fatal as the driver will use generic values.
6621 */
6622 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6623 if (unlikely(!rc)) {
6624 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6625 "0377 Error %d parsing vpd. "
6626 "Using defaults.\n", rc);
6627 rc = 0;
6628 }
James Smart76a95d72010-11-20 23:11:48 -05006629 kfree(vpd);
James Smartda0436e2009-05-22 14:51:39 -04006630
James Smartf1126682009-06-10 17:22:44 -04006631 /* Save information as VPD data */
6632 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6633 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6634 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6635 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6636 &mqe->un.read_rev);
6637 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6638 &mqe->un.read_rev);
6639 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6640 &mqe->un.read_rev);
6641 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6642 &mqe->un.read_rev);
6643 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6644 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6645 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6646 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6647 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6648 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6649 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6650 "(%d):0380 READ_REV Status x%x "
6651 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6652 mboxq->vport ? mboxq->vport->vpi : 0,
6653 bf_get(lpfc_mqe_status, mqe),
6654 phba->vpd.rev.opFwName,
6655 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6656 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
James Smartda0436e2009-05-22 14:51:39 -04006657
James Smart572709e2013-07-15 18:32:43 -04006658 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6659 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6660 if (phba->pport->cfg_lun_queue_depth > rc) {
6661 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6662 "3362 LUN queue depth changed from %d to %d\n",
6663 phba->pport->cfg_lun_queue_depth, rc);
6664 phba->pport->cfg_lun_queue_depth = rc;
6665 }
6666
James Smart65791f12016-07-06 12:35:56 -07006667 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
James Smart7bdedb32016-07-06 12:36:00 -07006668 LPFC_SLI_INTF_IF_TYPE_0) {
6669 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6670 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6671 if (rc == MBX_SUCCESS) {
6672 phba->hba_flag |= HBA_RECOVERABLE_UE;
6673 /* Set 1Sec interval to detect UE */
6674 phba->eratt_poll_interval = 1;
6675 phba->sli4_hba.ue_to_sr = bf_get(
6676 lpfc_mbx_set_feature_UESR,
6677 &mboxq->u.mqe.un.set_feature);
6678 phba->sli4_hba.ue_to_rp = bf_get(
6679 lpfc_mbx_set_feature_UERP,
6680 &mboxq->u.mqe.un.set_feature);
6681 }
6682 }
6683
6684 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6685 /* Enable MDS Diagnostics only if the SLI Port supports it */
6686 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6687 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6688 if (rc != MBX_SUCCESS)
6689 phba->mds_diags_support = 0;
6690 }
James Smart572709e2013-07-15 18:32:43 -04006691
James Smartda0436e2009-05-22 14:51:39 -04006692 /*
6693 * Discover the port's supported feature set and match it against the
6694 * hosts requests.
6695 */
6696 lpfc_request_features(phba, mboxq);
6697 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6698 if (unlikely(rc)) {
6699 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006700 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006701 }
6702
6703 /*
6704 * The port must support FCP initiator mode as this is the
6705 * only mode running in the host.
6706 */
6707 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6708 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6709 "0378 No support for fcpi mode.\n");
6710 ftr_rsp++;
6711 }
James Smartfedd3b72011-02-16 12:39:24 -05006712 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6713 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6714 else
6715 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
James Smartda0436e2009-05-22 14:51:39 -04006716 /*
6717 * If the port cannot support the host's requested features
6718 * then turn off the global config parameters to disable the
6719 * feature in the driver. This is not a fatal error.
6720 */
James Smartbf086112011-08-21 21:48:13 -04006721 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6722 if (phba->cfg_enable_bg) {
6723 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6724 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6725 else
6726 ftr_rsp++;
6727 }
James Smartda0436e2009-05-22 14:51:39 -04006728
6729 if (phba->max_vpi && phba->cfg_enable_npiv &&
6730 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6731 ftr_rsp++;
6732
6733 if (ftr_rsp) {
6734 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6735 "0379 Feature Mismatch Data: x%08x %08x "
6736 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6737 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6738 phba->cfg_enable_npiv, phba->max_vpi);
6739 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6740 phba->cfg_enable_bg = 0;
6741 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6742 phba->cfg_enable_npiv = 0;
6743 }
6744
6745 /* These SLI3 features are assumed in SLI4 */
6746 spin_lock_irq(&phba->hbalock);
6747 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6748 spin_unlock_irq(&phba->hbalock);
6749
James Smart6d368e52011-05-24 11:44:12 -04006750 /*
6751 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6752 * calls depends on these resources to complete port setup.
6753 */
6754 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6755 if (rc) {
6756 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6757 "2920 Failed to alloc Resource IDs "
6758 "rc = x%x\n", rc);
6759 goto out_free_mbox;
6760 }
6761
James Smart61bda8f2016-10-13 15:06:05 -07006762 lpfc_set_host_data(phba, mboxq);
6763
6764 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6765 if (rc) {
6766 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6767 "2134 Failed to set host os driver version %x",
6768 rc);
6769 }
6770
James Smartda0436e2009-05-22 14:51:39 -04006771 /* Read the port's service parameters. */
James Smart9f1177a2010-02-26 14:12:57 -05006772 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6773 if (rc) {
6774 phba->link_state = LPFC_HBA_ERROR;
6775 rc = -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -05006776 goto out_free_mbox;
James Smart9f1177a2010-02-26 14:12:57 -05006777 }
6778
James Smartda0436e2009-05-22 14:51:39 -04006779 mboxq->vport = vport;
6780 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6781 mp = (struct lpfc_dmabuf *) mboxq->context1;
6782 if (rc == MBX_SUCCESS) {
6783 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6784 rc = 0;
6785 }
6786
6787 /*
6788 * This memory was allocated by the lpfc_read_sparam routine. Release
6789 * it to the mbuf pool.
6790 */
6791 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6792 kfree(mp);
6793 mboxq->context1 = NULL;
6794 if (unlikely(rc)) {
6795 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6796 "0382 READ_SPARAM command failed "
6797 "status %d, mbxStatus x%x\n",
6798 rc, bf_get(lpfc_mqe_status, mqe));
6799 phba->link_state = LPFC_HBA_ERROR;
6800 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006801 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006802 }
6803
James Smart05580562011-05-24 11:40:48 -04006804 lpfc_update_vport_wwn(vport);
James Smartda0436e2009-05-22 14:51:39 -04006805
6806 /* Update the fc_host data structures with new wwn. */
6807 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6808 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6809
James Smart895427b2017-02-12 13:52:30 -08006810 /* Create all the SLI4 queues */
6811 rc = lpfc_sli4_queue_create(phba);
6812 if (rc) {
6813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6814 "3089 Failed to allocate queues\n");
6815 rc = -ENODEV;
6816 goto out_free_mbox;
6817 }
6818 /* Set up all the queues to the device */
6819 rc = lpfc_sli4_queue_setup(phba);
6820 if (unlikely(rc)) {
6821 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6822 "0381 Error %d during queue setup.\n ", rc);
6823 goto out_stop_timers;
6824 }
6825 /* Initialize the driver internal SLI layer lists. */
6826 lpfc_sli4_setup(phba);
6827 lpfc_sli4_queue_init(phba);
6828
6829 /* update host els xri-sgl sizes and mappings */
6830 rc = lpfc_sli4_els_sgl_update(phba);
James Smart8a9d2e82012-05-09 21:16:12 -04006831 if (unlikely(rc)) {
6832 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6833 "1400 Failed to update xri-sgl size and "
6834 "mapping: %d\n", rc);
James Smart895427b2017-02-12 13:52:30 -08006835 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04006836 }
6837
James Smart8a9d2e82012-05-09 21:16:12 -04006838 /* register the els sgl pool to the port */
James Smart895427b2017-02-12 13:52:30 -08006839 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
6840 phba->sli4_hba.els_xri_cnt);
6841 if (unlikely(rc < 0)) {
James Smart8a9d2e82012-05-09 21:16:12 -04006842 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6843 "0582 Error %d during els sgl post "
6844 "operation\n", rc);
6845 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08006846 goto out_destroy_queue;
6847 }
6848 phba->sli4_hba.els_xri_cnt = rc;
6849
James Smartf358dd02017-02-12 13:52:34 -08006850 if (phba->nvmet_support) {
6851 /* update host nvmet xri-sgl sizes and mappings */
6852 rc = lpfc_sli4_nvmet_sgl_update(phba);
6853 if (unlikely(rc)) {
6854 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6855 "6308 Failed to update nvmet-sgl size "
6856 "and mapping: %d\n", rc);
6857 goto out_destroy_queue;
6858 }
6859
6860 /* register the nvmet sgl pool to the port */
6861 rc = lpfc_sli4_repost_sgl_list(
6862 phba,
6863 &phba->sli4_hba.lpfc_nvmet_sgl_list,
6864 phba->sli4_hba.nvmet_xri_cnt);
6865 if (unlikely(rc < 0)) {
6866 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6867 "3117 Error %d during nvmet "
6868 "sgl post\n", rc);
6869 rc = -ENODEV;
6870 goto out_destroy_queue;
6871 }
6872 phba->sli4_hba.nvmet_xri_cnt = rc;
James Smartd613b6a2017-02-12 13:52:37 -08006873 lpfc_nvmet_create_targetport(phba);
James Smartf358dd02017-02-12 13:52:34 -08006874 } else {
James Smart895427b2017-02-12 13:52:30 -08006875 /* update host scsi xri-sgl sizes and mappings */
6876 rc = lpfc_sli4_scsi_sgl_update(phba);
6877 if (unlikely(rc)) {
6878 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6879 "6309 Failed to update scsi-sgl size "
6880 "and mapping: %d\n", rc);
6881 goto out_destroy_queue;
6882 }
6883
6884 /* update host nvme xri-sgl sizes and mappings */
6885 rc = lpfc_sli4_nvme_sgl_update(phba);
6886 if (unlikely(rc)) {
6887 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6888 "6082 Failed to update nvme-sgl size "
6889 "and mapping: %d\n", rc);
6890 goto out_destroy_queue;
6891 }
James Smart8a9d2e82012-05-09 21:16:12 -04006892 }
6893
James Smart2d7dbc42017-02-12 13:52:35 -08006894 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6895
6896 /* Post initial buffers to all RQs created */
6897 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6898 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
6899 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
6900 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
6901 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
6902 rqbp->entry_count = 256;
6903 rqbp->buffer_count = 0;
6904
6905 /* Divide by 4 and round down to multiple of 16 */
6906 rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
6907 phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
6908 phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
6909
6910 lpfc_post_rq_buffer(
6911 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6912 phba->sli4_hba.nvmet_mrq_data[i],
6913 phba->cfg_nvmet_mrq_post);
6914 }
6915 }
6916
James Smart895427b2017-02-12 13:52:30 -08006917 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6918 /* register the allocated scsi sgl pool to the port */
6919 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6920 if (unlikely(rc)) {
6921 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6922 "0383 Error %d during scsi sgl post "
6923 "operation\n", rc);
6924 /* Some Scsi buffers were moved to abort scsi list */
6925 /* A pci function reset will repost them */
6926 rc = -ENODEV;
6927 goto out_destroy_queue;
6928 }
James Smartda0436e2009-05-22 14:51:39 -04006929 }
6930
James Smart01649562017-02-12 13:52:32 -08006931 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
6932 (phba->nvmet_support == 0)) {
6933
6934 /* register the allocated nvme sgl pool to the port */
6935 rc = lpfc_repost_nvme_sgl_list(phba);
6936 if (unlikely(rc)) {
6937 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6938 "6116 Error %d during nvme sgl post "
6939 "operation\n", rc);
6940 /* Some NVME buffers were moved to abort nvme list */
6941 /* A pci function reset will repost them */
6942 rc = -ENODEV;
6943 goto out_destroy_queue;
6944 }
James Smartda0436e2009-05-22 14:51:39 -04006945 }
6946
6947 /* Post the rpi header region to the device. */
6948 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6949 if (unlikely(rc)) {
6950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6951 "0393 Error %d during rpi post operation\n",
6952 rc);
6953 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08006954 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04006955 }
James Smart97f2ecf2012-03-01 22:35:23 -05006956 lpfc_sli4_node_prep(phba);
James Smartda0436e2009-05-22 14:51:39 -04006957
James Smart895427b2017-02-12 13:52:30 -08006958 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
James Smart2d7dbc42017-02-12 13:52:35 -08006959 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
James Smart895427b2017-02-12 13:52:30 -08006960 /*
6961 * The FC Port needs to register FCFI (index 0)
6962 */
6963 lpfc_reg_fcfi(phba, mboxq);
6964 mboxq->vport = phba->pport;
6965 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6966 if (rc != MBX_SUCCESS)
6967 goto out_unset_queue;
6968 rc = 0;
6969 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6970 &mboxq->u.mqe.un.reg_fcfi);
James Smart2d7dbc42017-02-12 13:52:35 -08006971 } else {
6972 /* We are a NVME Target mode with MRQ > 1 */
6973
6974 /* First register the FCFI */
6975 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
6976 mboxq->vport = phba->pport;
6977 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6978 if (rc != MBX_SUCCESS)
6979 goto out_unset_queue;
6980 rc = 0;
6981 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
6982 &mboxq->u.mqe.un.reg_fcfi_mrq);
6983
6984 /* Next register the MRQs */
6985 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
6986 mboxq->vport = phba->pport;
6987 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6988 if (rc != MBX_SUCCESS)
6989 goto out_unset_queue;
6990 rc = 0;
James Smart895427b2017-02-12 13:52:30 -08006991 }
6992 /* Check if the port is configured to be disabled */
6993 lpfc_sli_read_link_ste(phba);
James Smartda0436e2009-05-22 14:51:39 -04006994 }
6995
6996 /* Arm the CQs and then EQs on device */
6997 lpfc_sli4_arm_cqeq_intr(phba);
6998
6999 /* Indicate device interrupt mode */
7000 phba->sli4_hba.intr_enable = 1;
7001
7002 /* Allow asynchronous mailbox command to go through */
7003 spin_lock_irq(&phba->hbalock);
7004 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7005 spin_unlock_irq(&phba->hbalock);
7006
7007 /* Post receive buffers to the device */
7008 lpfc_sli4_rb_setup(phba);
7009
James Smartfc2b9892010-02-26 14:15:29 -05007010 /* Reset HBA FCF states after HBA reset */
7011 phba->fcf.fcf_flag = 0;
7012 phba->fcf.current_rec.flag = 0;
7013
James Smartda0436e2009-05-22 14:51:39 -04007014 /* Start the ELS watchdog timer */
James Smart8fa38512009-07-19 10:01:03 -04007015 mod_timer(&vport->els_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04007016 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
James Smartda0436e2009-05-22 14:51:39 -04007017
7018 /* Start heart beat timer */
7019 mod_timer(&phba->hb_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04007020 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
James Smartda0436e2009-05-22 14:51:39 -04007021 phba->hb_outstanding = 0;
7022 phba->last_completion_time = jiffies;
7023
7024 /* Start error attention (ERATT) polling timer */
James Smart256ec0d2013-04-17 20:14:58 -04007025 mod_timer(&phba->eratt_poll,
James Smart65791f12016-07-06 12:35:56 -07007026 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smartda0436e2009-05-22 14:51:39 -04007027
James Smart75baf692010-06-08 18:31:21 -04007028 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7029 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7030 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7031 if (!rc) {
7032 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7033 "2829 This device supports "
7034 "Advanced Error Reporting (AER)\n");
7035 spin_lock_irq(&phba->hbalock);
7036 phba->hba_flag |= HBA_AER_ENABLED;
7037 spin_unlock_irq(&phba->hbalock);
7038 } else {
7039 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7040 "2830 This device does not support "
7041 "Advanced Error Reporting (AER)\n");
7042 phba->cfg_aer_support = 0;
7043 }
James Smart0a96e972011-07-22 18:37:28 -04007044 rc = 0;
James Smart75baf692010-06-08 18:31:21 -04007045 }
7046
James Smartda0436e2009-05-22 14:51:39 -04007047 /*
7048 * The port is ready, set the host's link state to LINK_DOWN
7049 * in preparation for link interrupts.
7050 */
James Smartda0436e2009-05-22 14:51:39 -04007051 spin_lock_irq(&phba->hbalock);
7052 phba->link_state = LPFC_LINK_DOWN;
7053 spin_unlock_irq(&phba->hbalock);
James Smart026abb82011-12-13 13:20:45 -05007054 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7055 (phba->hba_flag & LINK_DISABLED)) {
7056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7057 "3103 Adapter Link is disabled.\n");
7058 lpfc_down_link(phba, mboxq);
7059 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7060 if (rc != MBX_SUCCESS) {
7061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7062 "3104 Adapter failed to issue "
7063 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7064 goto out_unset_queue;
7065 }
7066 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
James Smart1b511972011-12-13 13:23:09 -05007067 /* don't perform init_link on SLI4 FC port loopback test */
7068 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7069 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7070 if (rc)
7071 goto out_unset_queue;
7072 }
James Smart5350d872011-10-10 21:33:49 -04007073 }
7074 mempool_free(mboxq, phba->mbox_mem_pool);
7075 return rc;
James Smart76a95d72010-11-20 23:11:48 -05007076out_unset_queue:
James Smartda0436e2009-05-22 14:51:39 -04007077 /* Unset all the queues set up in this routine when error out */
James Smart5350d872011-10-10 21:33:49 -04007078 lpfc_sli4_queue_unset(phba);
7079out_destroy_queue:
7080 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04007081out_stop_timers:
James Smart5350d872011-10-10 21:33:49 -04007082 lpfc_stop_hba_timers(phba);
James Smartda0436e2009-05-22 14:51:39 -04007083out_free_mbox:
7084 mempool_free(mboxq, phba->mbox_mem_pool);
7085 return rc;
7086}
James Smarte59058c2008-08-24 21:49:00 -04007087
7088/**
James Smart3621a712009-04-06 18:47:14 -04007089 * lpfc_mbox_timeout - Timeout call back function for mbox timer
James Smarte59058c2008-08-24 21:49:00 -04007090 * @ptr: context object - pointer to hba structure.
dea31012005-04-17 16:05:31 -05007091 *
James Smarte59058c2008-08-24 21:49:00 -04007092 * This is the callback function for mailbox timer. The mailbox
7093 * timer is armed when a new mailbox command is issued and the timer
7094 * is deleted when the mailbox complete. The function is called by
7095 * the kernel timer code when a mailbox does not complete within
7096 * expected time. This function wakes up the worker thread to
7097 * process the mailbox timeout and returns. All the processing is
7098 * done by the worker thread function lpfc_mbox_timeout_handler.
7099 **/
dea31012005-04-17 16:05:31 -05007100void
7101lpfc_mbox_timeout(unsigned long ptr)
7102{
James Smart92d7f7b2007-06-17 19:56:38 -05007103 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
dea31012005-04-17 16:05:31 -05007104 unsigned long iflag;
James Smart2e0fef82007-06-17 19:56:36 -05007105 uint32_t tmo_posted;
dea31012005-04-17 16:05:31 -05007106
James Smart2e0fef82007-06-17 19:56:36 -05007107 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -05007108 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
James Smart2e0fef82007-06-17 19:56:36 -05007109 if (!tmo_posted)
7110 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7111 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7112
James Smart5e9d9b82008-06-14 22:52:53 -04007113 if (!tmo_posted)
7114 lpfc_worker_wake_up(phba);
7115 return;
dea31012005-04-17 16:05:31 -05007116}
7117
James Smarte8d3c3b2013-10-10 12:21:30 -04007118/**
7119 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7120 * are pending
7121 * @phba: Pointer to HBA context object.
7122 *
7123 * This function checks if any mailbox completions are present on the mailbox
7124 * completion queue.
7125 **/
Nicholas Krause3bb11fc2015-08-31 16:48:13 -04007126static bool
James Smarte8d3c3b2013-10-10 12:21:30 -04007127lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7128{
7129
7130 uint32_t idx;
7131 struct lpfc_queue *mcq;
7132 struct lpfc_mcqe *mcqe;
7133 bool pending_completions = false;
7134
7135 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7136 return false;
7137
7138 /* Check for completions on mailbox completion queue */
7139
7140 mcq = phba->sli4_hba.mbx_cq;
7141 idx = mcq->hba_index;
7142 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
7143 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7144 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7145 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7146 pending_completions = true;
7147 break;
7148 }
7149 idx = (idx + 1) % mcq->entry_count;
7150 if (mcq->hba_index == idx)
7151 break;
7152 }
7153 return pending_completions;
7154
7155}
7156
7157/**
7158 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7159 * that were missed.
7160 * @phba: Pointer to HBA context object.
7161 *
7162 * For sli4, it is possible to miss an interrupt. As such mbox completions
7163 * maybe missed causing erroneous mailbox timeouts to occur. This function
7164 * checks to see if mbox completions are on the mailbox completion queue
7165 * and will process all the completions associated with the eq for the
7166 * mailbox completion queue.
7167 **/
7168bool
7169lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7170{
7171
7172 uint32_t eqidx;
7173 struct lpfc_queue *fpeq = NULL;
7174 struct lpfc_eqe *eqe;
7175 bool mbox_pending;
7176
7177 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7178 return false;
7179
7180 /* Find the eq associated with the mcq */
7181
7182 if (phba->sli4_hba.hba_eq)
James Smart895427b2017-02-12 13:52:30 -08007183 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
James Smarte8d3c3b2013-10-10 12:21:30 -04007184 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
7185 phba->sli4_hba.mbx_cq->assoc_qid) {
7186 fpeq = phba->sli4_hba.hba_eq[eqidx];
7187 break;
7188 }
7189 if (!fpeq)
7190 return false;
7191
7192 /* Turn off interrupts from this EQ */
7193
7194 lpfc_sli4_eq_clr_intr(fpeq);
7195
7196 /* Check to see if a mbox completion is pending */
7197
7198 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7199
7200 /*
7201 * If a mbox completion is pending, process all the events on EQ
7202 * associated with the mbox completion queue (this could include
7203 * mailbox commands, async events, els commands, receive queue data
7204 * and fcp commands)
7205 */
7206
7207 if (mbox_pending)
7208 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7209 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7210 fpeq->EQ_processed++;
7211 }
7212
7213 /* Always clear and re-arm the EQ */
7214
7215 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7216
7217 return mbox_pending;
7218
7219}
James Smarte59058c2008-08-24 21:49:00 -04007220
7221/**
James Smart3621a712009-04-06 18:47:14 -04007222 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
James Smarte59058c2008-08-24 21:49:00 -04007223 * @phba: Pointer to HBA context object.
7224 *
7225 * This function is called from worker thread when a mailbox command times out.
7226 * The caller is not required to hold any locks. This function will reset the
7227 * HBA and recover all the pending commands.
7228 **/
dea31012005-04-17 16:05:31 -05007229void
7230lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7231{
James Smart2e0fef82007-06-17 19:56:36 -05007232 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
James Smarteb016562014-09-03 12:58:06 -04007233 MAILBOX_t *mb = NULL;
7234
James Smart1dcb58e2007-04-25 09:51:30 -04007235 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05007236
James Smarte8d3c3b2013-10-10 12:21:30 -04007237 /* If the mailbox completed, process the completion and return */
7238 if (lpfc_sli4_process_missed_mbox_completions(phba))
7239 return;
7240
James Smarteb016562014-09-03 12:58:06 -04007241 if (pmbox != NULL)
7242 mb = &pmbox->u.mb;
James Smarta257bf92009-04-06 18:48:10 -04007243 /* Check the pmbox pointer first. There is a race condition
7244 * between the mbox timeout handler getting executed in the
7245 * worklist and the mailbox actually completing. When this
7246 * race condition occurs, the mbox_active will be NULL.
7247 */
7248 spin_lock_irq(&phba->hbalock);
7249 if (pmbox == NULL) {
7250 lpfc_printf_log(phba, KERN_WARNING,
7251 LOG_MBOX | LOG_SLI,
7252 "0353 Active Mailbox cleared - mailbox timeout "
7253 "exiting\n");
7254 spin_unlock_irq(&phba->hbalock);
7255 return;
7256 }
7257
dea31012005-04-17 16:05:31 -05007258 /* Mbox cmd <mbxCommand> timeout */
James Smarted957682007-06-17 19:56:37 -05007259 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04007260 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
James Smart92d7f7b2007-06-17 19:56:38 -05007261 mb->mbxCommand,
7262 phba->pport->port_state,
7263 phba->sli.sli_flag,
7264 phba->sli.mbox_active);
James Smarta257bf92009-04-06 18:48:10 -04007265 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05007266
James Smart1dcb58e2007-04-25 09:51:30 -04007267 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7268 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007269 * it to fail all outstanding SCSI IO.
James Smart1dcb58e2007-04-25 09:51:30 -04007270 */
James Smart2e0fef82007-06-17 19:56:36 -05007271 spin_lock_irq(&phba->pport->work_port_lock);
7272 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7273 spin_unlock_irq(&phba->pport->work_port_lock);
7274 spin_lock_irq(&phba->hbalock);
7275 phba->link_state = LPFC_LINK_UNKNOWN;
James Smartf4b4c682009-05-22 14:53:12 -04007276 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05007277 spin_unlock_irq(&phba->hbalock);
James Smart1dcb58e2007-04-25 09:51:30 -04007278
James Smartdb55fba2014-04-04 13:52:02 -04007279 lpfc_sli_abort_fcp_rings(phba);
James Smart1dcb58e2007-04-25 09:51:30 -04007280
7281 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04007282 "0345 Resetting board due to mailbox timeout\n");
James Smart3772a992009-05-22 14:50:54 -04007283
7284 /* Reset the HBA device */
7285 lpfc_reset_hba(phba);
dea31012005-04-17 16:05:31 -05007286}
7287
James Smarte59058c2008-08-24 21:49:00 -04007288/**
James Smart3772a992009-05-22 14:50:54 -04007289 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
James Smarte59058c2008-08-24 21:49:00 -04007290 * @phba: Pointer to HBA context object.
7291 * @pmbox: Pointer to mailbox object.
7292 * @flag: Flag indicating how the mailbox need to be processed.
7293 *
7294 * This function is called by discovery code and HBA management code
James Smart3772a992009-05-22 14:50:54 -04007295 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7296 * function gets the hbalock to protect the data structures.
James Smarte59058c2008-08-24 21:49:00 -04007297 * The mailbox command can be submitted in polling mode, in which case
7298 * this function will wait in a polling loop for the completion of the
7299 * mailbox.
7300 * If the mailbox is submitted in no_wait mode (not polling) the
7301 * function will submit the command and returns immediately without waiting
7302 * for the mailbox completion. The no_wait is supported only when HBA
7303 * is in SLI2/SLI3 mode - interrupts are enabled.
7304 * The SLI interface allows only one mailbox pending at a time. If the
7305 * mailbox is issued in polling mode and there is already a mailbox
7306 * pending, then the function will return an error. If the mailbox is issued
7307 * in NO_WAIT mode and there is a mailbox pending already, the function
7308 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7309 * The sli layer owns the mailbox object until the completion of mailbox
7310 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7311 * return codes the caller owns the mailbox command after the return of
7312 * the function.
7313 **/
James Smart3772a992009-05-22 14:50:54 -04007314static int
7315lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7316 uint32_t flag)
dea31012005-04-17 16:05:31 -05007317{
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007318 MAILBOX_t *mbx;
James Smart2e0fef82007-06-17 19:56:36 -05007319 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05007320 uint32_t status, evtctr;
James Smart9940b972011-03-11 16:06:12 -05007321 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -05007322 int i;
James Smart09372822008-01-11 01:52:54 -05007323 unsigned long timeout;
dea31012005-04-17 16:05:31 -05007324 unsigned long drvr_flag = 0;
James Smart34b02dc2008-08-24 21:49:55 -04007325 uint32_t word0, ldata;
dea31012005-04-17 16:05:31 -05007326 void __iomem *to_slim;
James Smart58da1ff2008-04-07 10:15:56 -04007327 int processing_queue = 0;
7328
7329 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7330 if (!pmbox) {
James Smart8568a4d2009-07-19 10:01:16 -04007331 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart58da1ff2008-04-07 10:15:56 -04007332 /* processing mbox queue from intr_handler */
James Smart3772a992009-05-22 14:50:54 -04007333 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7334 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7335 return MBX_SUCCESS;
7336 }
James Smart58da1ff2008-04-07 10:15:56 -04007337 processing_queue = 1;
James Smart58da1ff2008-04-07 10:15:56 -04007338 pmbox = lpfc_mbox_get(phba);
7339 if (!pmbox) {
7340 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7341 return MBX_SUCCESS;
7342 }
7343 }
dea31012005-04-17 16:05:31 -05007344
James Smarted957682007-06-17 19:56:37 -05007345 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
James Smart92d7f7b2007-06-17 19:56:38 -05007346 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
James Smarted957682007-06-17 19:56:37 -05007347 if(!pmbox->vport) {
James Smart58da1ff2008-04-07 10:15:56 -04007348 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
James Smarted957682007-06-17 19:56:37 -05007349 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05007350 LOG_MBOX | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04007351 "1806 Mbox x%x failed. No vport\n",
James Smart3772a992009-05-22 14:50:54 -04007352 pmbox->u.mb.mbxCommand);
James Smarted957682007-06-17 19:56:37 -05007353 dump_stack();
James Smart58da1ff2008-04-07 10:15:56 -04007354 goto out_not_finished;
James Smarted957682007-06-17 19:56:37 -05007355 }
7356 }
7357
Linas Vepstas8d63f372007-02-14 14:28:36 -06007358 /* If the PCI channel is in offline state, do not post mbox. */
James Smart58da1ff2008-04-07 10:15:56 -04007359 if (unlikely(pci_channel_offline(phba->pcidev))) {
7360 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7361 goto out_not_finished;
7362 }
Linas Vepstas8d63f372007-02-14 14:28:36 -06007363
James Smarta257bf92009-04-06 18:48:10 -04007364 /* If HBA has a deferred error attention, fail the iocb. */
7365 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7366 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7367 goto out_not_finished;
7368 }
7369
dea31012005-04-17 16:05:31 -05007370 psli = &phba->sli;
James Smart92d7f7b2007-06-17 19:56:38 -05007371
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007372 mbx = &pmbox->u.mb;
dea31012005-04-17 16:05:31 -05007373 status = MBX_SUCCESS;
7374
James Smart2e0fef82007-06-17 19:56:36 -05007375 if (phba->link_state == LPFC_HBA_ERROR) {
7376 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05007377
7378 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007379 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7380 "(%d):0311 Mailbox command x%x cannot "
7381 "issue Data: x%x x%x\n",
7382 pmbox->vport ? pmbox->vport->vpi : 0,
7383 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007384 goto out_not_finished;
Jamie Wellnitz41415862006-02-28 19:25:27 -05007385 }
7386
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007387 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
James Smart9940b972011-03-11 16:06:12 -05007388 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7389 !(hc_copy & HC_MBINT_ENA)) {
7390 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7391 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart3772a992009-05-22 14:50:54 -04007392 "(%d):2528 Mailbox command x%x cannot "
7393 "issue Data: x%x x%x\n",
7394 pmbox->vport ? pmbox->vport->vpi : 0,
7395 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart9940b972011-03-11 16:06:12 -05007396 goto out_not_finished;
7397 }
James Smart92908312006-03-07 15:04:13 -05007398 }
7399
dea31012005-04-17 16:05:31 -05007400 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7401 /* Polling for a mbox command when another one is already active
7402 * is not allowed in SLI. Also, the driver must have established
7403 * SLI2 mode to queue and process multiple mbox commands.
7404 */
7405
7406 if (flag & MBX_POLL) {
James Smart2e0fef82007-06-17 19:56:36 -05007407 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007408
7409 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007410 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7411 "(%d):2529 Mailbox command x%x "
7412 "cannot issue Data: x%x x%x\n",
7413 pmbox->vport ? pmbox->vport->vpi : 0,
7414 pmbox->u.mb.mbxCommand,
7415 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007416 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007417 }
7418
James Smart3772a992009-05-22 14:50:54 -04007419 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
James Smart2e0fef82007-06-17 19:56:36 -05007420 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007421 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007422 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7423 "(%d):2530 Mailbox command x%x "
7424 "cannot issue Data: x%x x%x\n",
7425 pmbox->vport ? pmbox->vport->vpi : 0,
7426 pmbox->u.mb.mbxCommand,
7427 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007428 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007429 }
7430
dea31012005-04-17 16:05:31 -05007431 /* Another mailbox command is still being processed, queue this
7432 * command to be processed later.
7433 */
7434 lpfc_mbox_put(phba, pmbox);
7435
7436 /* Mbox cmd issue - BUSY */
James Smarted957682007-06-17 19:56:37 -05007437 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04007438 "(%d):0308 Mbox cmd issue - BUSY Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05007439 "x%x x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05007440 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007441 mbx->mbxCommand, phba->pport->port_state,
James Smart92d7f7b2007-06-17 19:56:38 -05007442 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05007443
7444 psli->slistat.mbox_busy++;
James Smart2e0fef82007-06-17 19:56:36 -05007445 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007446
James Smart858c9f62007-06-17 19:56:39 -05007447 if (pmbox->vport) {
7448 lpfc_debugfs_disc_trc(pmbox->vport,
7449 LPFC_DISC_TRC_MBOX_VPORT,
7450 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007451 (uint32_t)mbx->mbxCommand,
7452 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007453 }
7454 else {
7455 lpfc_debugfs_disc_trc(phba->pport,
7456 LPFC_DISC_TRC_MBOX,
7457 "MBOX Bsy: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007458 (uint32_t)mbx->mbxCommand,
7459 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007460 }
7461
James Smart2e0fef82007-06-17 19:56:36 -05007462 return MBX_BUSY;
dea31012005-04-17 16:05:31 -05007463 }
7464
dea31012005-04-17 16:05:31 -05007465 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7466
7467 /* If we are not polling, we MUST be in SLI2 mode */
7468 if (flag != MBX_POLL) {
James Smart3772a992009-05-22 14:50:54 -04007469 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007470 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea31012005-04-17 16:05:31 -05007471 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05007472 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007473 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007474 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7475 "(%d):2531 Mailbox command x%x "
7476 "cannot issue Data: x%x x%x\n",
7477 pmbox->vport ? pmbox->vport->vpi : 0,
7478 pmbox->u.mb.mbxCommand,
7479 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007480 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007481 }
7482 /* timeout active mbox command */
James Smart256ec0d2013-04-17 20:14:58 -04007483 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7484 1000);
7485 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea31012005-04-17 16:05:31 -05007486 }
7487
7488 /* Mailbox cmd <cmd> issue */
James Smarted957682007-06-17 19:56:37 -05007489 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04007490 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05007491 "x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04007492 pmbox->vport ? pmbox->vport->vpi : 0,
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007493 mbx->mbxCommand, phba->pport->port_state,
James Smart92d7f7b2007-06-17 19:56:38 -05007494 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05007495
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007496 if (mbx->mbxCommand != MBX_HEARTBEAT) {
James Smart858c9f62007-06-17 19:56:39 -05007497 if (pmbox->vport) {
7498 lpfc_debugfs_disc_trc(pmbox->vport,
7499 LPFC_DISC_TRC_MBOX_VPORT,
7500 "MBOX Send vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007501 (uint32_t)mbx->mbxCommand,
7502 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007503 }
7504 else {
7505 lpfc_debugfs_disc_trc(phba->pport,
7506 LPFC_DISC_TRC_MBOX,
7507 "MBOX Send: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007508 (uint32_t)mbx->mbxCommand,
7509 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007510 }
7511 }
7512
dea31012005-04-17 16:05:31 -05007513 psli->slistat.mbox_cmd++;
7514 evtctr = psli->slistat.mbox_event;
7515
7516 /* next set own bit for the adapter and copy over command word */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007517 mbx->mbxOwner = OWN_CHIP;
dea31012005-04-17 16:05:31 -05007518
James Smart3772a992009-05-22 14:50:54 -04007519 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart7a470272010-03-15 11:25:20 -04007520 /* Populate mbox extension offset word. */
7521 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007522 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04007523 = (uint8_t *)phba->mbox_ext
7524 - (uint8_t *)phba->mbox;
7525 }
7526
7527 /* Copy the mailbox extension data */
7528 if (pmbox->in_ext_byte_len && pmbox->context2) {
7529 lpfc_sli_pcimem_bcopy(pmbox->context2,
7530 (uint8_t *)phba->mbox_ext,
7531 pmbox->in_ext_byte_len);
7532 }
7533 /* Copy command data to host SLIM area */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007534 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05007535 } else {
James Smart7a470272010-03-15 11:25:20 -04007536 /* Populate mbox extension offset word. */
7537 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007538 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04007539 = MAILBOX_HBA_EXT_OFFSET;
7540
7541 /* Copy the mailbox extension data */
James Smart895427b2017-02-12 13:52:30 -08007542 if (pmbox->in_ext_byte_len && pmbox->context2)
James Smart7a470272010-03-15 11:25:20 -04007543 lpfc_memcpy_to_slim(phba->MBslimaddr +
7544 MAILBOX_HBA_EXT_OFFSET,
7545 pmbox->context2, pmbox->in_ext_byte_len);
7546
James Smart895427b2017-02-12 13:52:30 -08007547 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05007548 /* copy command data into host mbox for cmpl */
James Smart895427b2017-02-12 13:52:30 -08007549 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7550 MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05007551
7552 /* First copy mbox command data to HBA SLIM, skip past first
7553 word */
7554 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007555 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea31012005-04-17 16:05:31 -05007556 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7557
7558 /* Next copy over first word, with mbxOwner set */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007559 ldata = *((uint32_t *)mbx);
dea31012005-04-17 16:05:31 -05007560 to_slim = phba->MBslimaddr;
7561 writel(ldata, to_slim);
7562 readl(to_slim); /* flush */
7563
James Smart895427b2017-02-12 13:52:30 -08007564 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05007565 /* switch over to host mailbox */
James Smart3772a992009-05-22 14:50:54 -04007566 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05007567 }
7568
7569 wmb();
dea31012005-04-17 16:05:31 -05007570
7571 switch (flag) {
7572 case MBX_NOWAIT:
James Smart09372822008-01-11 01:52:54 -05007573 /* Set up reference to mailbox command */
dea31012005-04-17 16:05:31 -05007574 psli->mbox_active = pmbox;
James Smart09372822008-01-11 01:52:54 -05007575 /* Interrupt board to do it */
7576 writel(CA_MBATT, phba->CAregaddr);
7577 readl(phba->CAregaddr); /* flush */
7578 /* Don't wait for it to finish, just return */
dea31012005-04-17 16:05:31 -05007579 break;
7580
7581 case MBX_POLL:
James Smart09372822008-01-11 01:52:54 -05007582 /* Set up null reference to mailbox command */
dea31012005-04-17 16:05:31 -05007583 psli->mbox_active = NULL;
James Smart09372822008-01-11 01:52:54 -05007584 /* Interrupt board to do it */
7585 writel(CA_MBATT, phba->CAregaddr);
7586 readl(phba->CAregaddr); /* flush */
7587
James Smart3772a992009-05-22 14:50:54 -04007588 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05007589 /* First read mbox status word */
James Smart34b02dc2008-08-24 21:49:55 -04007590 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05007591 word0 = le32_to_cpu(word0);
7592 } else {
7593 /* First read mbox status word */
James Smart9940b972011-03-11 16:06:12 -05007594 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7595 spin_unlock_irqrestore(&phba->hbalock,
7596 drvr_flag);
7597 goto out_not_finished;
7598 }
dea31012005-04-17 16:05:31 -05007599 }
7600
7601 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05007602 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7603 spin_unlock_irqrestore(&phba->hbalock,
7604 drvr_flag);
7605 goto out_not_finished;
7606 }
James Smarta183a152011-10-10 21:32:43 -04007607 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7608 1000) + jiffies;
James Smart09372822008-01-11 01:52:54 -05007609 i = 0;
dea31012005-04-17 16:05:31 -05007610 /* Wait for command to complete */
Jamie Wellnitz41415862006-02-28 19:25:27 -05007611 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7612 (!(ha_copy & HA_MBATT) &&
James Smart2e0fef82007-06-17 19:56:36 -05007613 (phba->link_state > LPFC_WARM_START))) {
James Smart09372822008-01-11 01:52:54 -05007614 if (time_after(jiffies, timeout)) {
dea31012005-04-17 16:05:31 -05007615 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05007616 spin_unlock_irqrestore(&phba->hbalock,
dea31012005-04-17 16:05:31 -05007617 drvr_flag);
James Smart58da1ff2008-04-07 10:15:56 -04007618 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007619 }
7620
7621 /* Check if we took a mbox interrupt while we were
7622 polling */
7623 if (((word0 & OWN_CHIP) != OWN_CHIP)
7624 && (evtctr != psli->slistat.mbox_event))
7625 break;
7626
James Smart09372822008-01-11 01:52:54 -05007627 if (i++ > 10) {
7628 spin_unlock_irqrestore(&phba->hbalock,
7629 drvr_flag);
7630 msleep(1);
7631 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7632 }
dea31012005-04-17 16:05:31 -05007633
James Smart3772a992009-05-22 14:50:54 -04007634 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05007635 /* First copy command data */
James Smart34b02dc2008-08-24 21:49:55 -04007636 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05007637 word0 = le32_to_cpu(word0);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007638 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea31012005-04-17 16:05:31 -05007639 MAILBOX_t *slimmb;
James Smart34b02dc2008-08-24 21:49:55 -04007640 uint32_t slimword0;
dea31012005-04-17 16:05:31 -05007641 /* Check real SLIM for any errors */
7642 slimword0 = readl(phba->MBslimaddr);
7643 slimmb = (MAILBOX_t *) & slimword0;
7644 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7645 && slimmb->mbxStatus) {
7646 psli->sli_flag &=
James Smart3772a992009-05-22 14:50:54 -04007647 ~LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05007648 word0 = slimword0;
7649 }
7650 }
7651 } else {
7652 /* First copy command data */
7653 word0 = readl(phba->MBslimaddr);
7654 }
7655 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05007656 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7657 spin_unlock_irqrestore(&phba->hbalock,
7658 drvr_flag);
7659 goto out_not_finished;
7660 }
dea31012005-04-17 16:05:31 -05007661 }
7662
James Smart3772a992009-05-22 14:50:54 -04007663 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05007664 /* copy results back to user */
James Smart2ea259e2017-02-12 13:52:27 -08007665 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7666 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04007667 /* Copy the mailbox extension data */
7668 if (pmbox->out_ext_byte_len && pmbox->context2) {
7669 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7670 pmbox->context2,
7671 pmbox->out_ext_byte_len);
7672 }
dea31012005-04-17 16:05:31 -05007673 } else {
7674 /* First copy command data */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007675 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
James Smart2ea259e2017-02-12 13:52:27 -08007676 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04007677 /* Copy the mailbox extension data */
7678 if (pmbox->out_ext_byte_len && pmbox->context2) {
7679 lpfc_memcpy_from_slim(pmbox->context2,
7680 phba->MBslimaddr +
7681 MAILBOX_HBA_EXT_OFFSET,
7682 pmbox->out_ext_byte_len);
dea31012005-04-17 16:05:31 -05007683 }
7684 }
7685
7686 writel(HA_MBATT, phba->HAregaddr);
7687 readl(phba->HAregaddr); /* flush */
7688
7689 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007690 status = mbx->mbxStatus;
dea31012005-04-17 16:05:31 -05007691 }
7692
James Smart2e0fef82007-06-17 19:56:36 -05007693 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7694 return status;
James Smart58da1ff2008-04-07 10:15:56 -04007695
7696out_not_finished:
7697 if (processing_queue) {
James Smartda0436e2009-05-22 14:51:39 -04007698 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
James Smart58da1ff2008-04-07 10:15:56 -04007699 lpfc_mbox_cmpl_put(phba, pmbox);
7700 }
7701 return MBX_NOT_FINISHED;
dea31012005-04-17 16:05:31 -05007702}
7703
James Smarte59058c2008-08-24 21:49:00 -04007704/**
James Smartf1126682009-06-10 17:22:44 -04007705 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7706 * @phba: Pointer to HBA context object.
7707 *
7708 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7709 * the driver internal pending mailbox queue. It will then try to wait out the
7710 * possible outstanding mailbox command before return.
7711 *
7712 * Returns:
7713 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7714 * the outstanding mailbox command timed out.
7715 **/
7716static int
7717lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7718{
7719 struct lpfc_sli *psli = &phba->sli;
James Smartf1126682009-06-10 17:22:44 -04007720 int rc = 0;
James Smarta183a152011-10-10 21:32:43 -04007721 unsigned long timeout = 0;
James Smartf1126682009-06-10 17:22:44 -04007722
7723 /* Mark the asynchronous mailbox command posting as blocked */
7724 spin_lock_irq(&phba->hbalock);
7725 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smartf1126682009-06-10 17:22:44 -04007726 /* Determine how long we might wait for the active mailbox
7727 * command to be gracefully completed by firmware.
7728 */
James Smarta183a152011-10-10 21:32:43 -04007729 if (phba->sli.mbox_active)
7730 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7731 phba->sli.mbox_active) *
7732 1000) + jiffies;
7733 spin_unlock_irq(&phba->hbalock);
7734
James Smarte8d3c3b2013-10-10 12:21:30 -04007735 /* Make sure the mailbox is really active */
7736 if (timeout)
7737 lpfc_sli4_process_missed_mbox_completions(phba);
7738
James Smartf1126682009-06-10 17:22:44 -04007739 /* Wait for the outstnading mailbox command to complete */
7740 while (phba->sli.mbox_active) {
7741 /* Check active mailbox complete status every 2ms */
7742 msleep(2);
7743 if (time_after(jiffies, timeout)) {
7744 /* Timeout, marked the outstanding cmd not complete */
7745 rc = 1;
7746 break;
7747 }
7748 }
7749
7750 /* Can not cleanly block async mailbox command, fails it */
7751 if (rc) {
7752 spin_lock_irq(&phba->hbalock);
7753 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7754 spin_unlock_irq(&phba->hbalock);
7755 }
7756 return rc;
7757}
7758
7759/**
7760 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7761 * @phba: Pointer to HBA context object.
7762 *
7763 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7764 * commands from the driver internal pending mailbox queue. It makes sure
7765 * that there is no outstanding mailbox command before resuming posting
7766 * asynchronous mailbox commands. If, for any reason, there is outstanding
7767 * mailbox command, it will try to wait it out before resuming asynchronous
7768 * mailbox command posting.
7769 **/
7770static void
7771lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7772{
7773 struct lpfc_sli *psli = &phba->sli;
7774
7775 spin_lock_irq(&phba->hbalock);
7776 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7777 /* Asynchronous mailbox posting is not blocked, do nothing */
7778 spin_unlock_irq(&phba->hbalock);
7779 return;
7780 }
7781
7782 /* Outstanding synchronous mailbox command is guaranteed to be done,
7783 * successful or timeout, after timing-out the outstanding mailbox
7784 * command shall always be removed, so just unblock posting async
7785 * mailbox command and resume
7786 */
7787 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7788 spin_unlock_irq(&phba->hbalock);
7789
7790 /* wake up worker thread to post asynchronlous mailbox command */
7791 lpfc_worker_wake_up(phba);
7792}
7793
7794/**
James Smart2d843ed2012-09-29 11:29:06 -04007795 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7796 * @phba: Pointer to HBA context object.
7797 * @mboxq: Pointer to mailbox object.
7798 *
7799 * The function waits for the bootstrap mailbox register ready bit from
7800 * port for twice the regular mailbox command timeout value.
7801 *
7802 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7803 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7804 **/
7805static int
7806lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7807{
7808 uint32_t db_ready;
7809 unsigned long timeout;
7810 struct lpfc_register bmbx_reg;
7811
7812 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7813 * 1000) + jiffies;
7814
7815 do {
7816 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7817 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7818 if (!db_ready)
7819 msleep(2);
7820
7821 if (time_after(jiffies, timeout))
7822 return MBXERR_ERROR;
7823 } while (!db_ready);
7824
7825 return 0;
7826}
7827
7828/**
James Smartda0436e2009-05-22 14:51:39 -04007829 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7830 * @phba: Pointer to HBA context object.
7831 * @mboxq: Pointer to mailbox object.
7832 *
7833 * The function posts a mailbox to the port. The mailbox is expected
7834 * to be comletely filled in and ready for the port to operate on it.
7835 * This routine executes a synchronous completion operation on the
7836 * mailbox by polling for its completion.
7837 *
7838 * The caller must not be holding any locks when calling this routine.
7839 *
7840 * Returns:
7841 * MBX_SUCCESS - mailbox posted successfully
7842 * Any of the MBX error values.
7843 **/
7844static int
7845lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7846{
7847 int rc = MBX_SUCCESS;
7848 unsigned long iflag;
James Smartda0436e2009-05-22 14:51:39 -04007849 uint32_t mcqe_status;
7850 uint32_t mbx_cmnd;
James Smartda0436e2009-05-22 14:51:39 -04007851 struct lpfc_sli *psli = &phba->sli;
7852 struct lpfc_mqe *mb = &mboxq->u.mqe;
7853 struct lpfc_bmbx_create *mbox_rgn;
7854 struct dma_address *dma_address;
James Smartda0436e2009-05-22 14:51:39 -04007855
7856 /*
7857 * Only one mailbox can be active to the bootstrap mailbox region
7858 * at a time and there is no queueing provided.
7859 */
7860 spin_lock_irqsave(&phba->hbalock, iflag);
7861 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7862 spin_unlock_irqrestore(&phba->hbalock, iflag);
7863 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007864 "(%d):2532 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04007865 "cannot issue Data: x%x x%x\n",
7866 mboxq->vport ? mboxq->vport->vpi : 0,
7867 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007868 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7869 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007870 psli->sli_flag, MBX_POLL);
7871 return MBXERR_ERROR;
7872 }
7873 /* The server grabs the token and owns it until release */
7874 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7875 phba->sli.mbox_active = mboxq;
7876 spin_unlock_irqrestore(&phba->hbalock, iflag);
7877
James Smart2d843ed2012-09-29 11:29:06 -04007878 /* wait for bootstrap mbox register for readyness */
7879 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7880 if (rc)
7881 goto exit;
7882
James Smartda0436e2009-05-22 14:51:39 -04007883 /*
7884 * Initialize the bootstrap memory region to avoid stale data areas
7885 * in the mailbox post. Then copy the caller's mailbox contents to
7886 * the bmbx mailbox region.
7887 */
7888 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7889 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7890 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7891 sizeof(struct lpfc_mqe));
7892
7893 /* Post the high mailbox dma address to the port and wait for ready. */
7894 dma_address = &phba->sli4_hba.bmbx.dma_address;
7895 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7896
James Smart2d843ed2012-09-29 11:29:06 -04007897 /* wait for bootstrap mbox register for hi-address write done */
7898 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7899 if (rc)
7900 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04007901
7902 /* Post the low mailbox dma address to the port. */
7903 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
James Smartda0436e2009-05-22 14:51:39 -04007904
James Smart2d843ed2012-09-29 11:29:06 -04007905 /* wait for bootstrap mbox register for low address write done */
7906 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7907 if (rc)
7908 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04007909
7910 /*
7911 * Read the CQ to ensure the mailbox has completed.
7912 * If so, update the mailbox status so that the upper layers
7913 * can complete the request normally.
7914 */
7915 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7916 sizeof(struct lpfc_mqe));
7917 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7918 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7919 sizeof(struct lpfc_mcqe));
7920 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
James Smart05580562011-05-24 11:40:48 -04007921 /*
7922 * When the CQE status indicates a failure and the mailbox status
7923 * indicates success then copy the CQE status into the mailbox status
7924 * (and prefix it with x4000).
7925 */
James Smartda0436e2009-05-22 14:51:39 -04007926 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
James Smart05580562011-05-24 11:40:48 -04007927 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7928 bf_set(lpfc_mqe_status, mb,
7929 (LPFC_MBX_ERROR_RANGE | mcqe_status));
James Smartda0436e2009-05-22 14:51:39 -04007930 rc = MBXERR_ERROR;
James Smartd7c47992010-06-08 18:31:54 -04007931 } else
7932 lpfc_sli4_swap_str(phba, mboxq);
James Smartda0436e2009-05-22 14:51:39 -04007933
7934 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007935 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
James Smartda0436e2009-05-22 14:51:39 -04007936 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7937 " x%x x%x CQ: x%x x%x x%x x%x\n",
James Smarta183a152011-10-10 21:32:43 -04007938 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7939 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7940 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007941 bf_get(lpfc_mqe_status, mb),
7942 mb->un.mb_words[0], mb->un.mb_words[1],
7943 mb->un.mb_words[2], mb->un.mb_words[3],
7944 mb->un.mb_words[4], mb->un.mb_words[5],
7945 mb->un.mb_words[6], mb->un.mb_words[7],
7946 mb->un.mb_words[8], mb->un.mb_words[9],
7947 mb->un.mb_words[10], mb->un.mb_words[11],
7948 mb->un.mb_words[12], mboxq->mcqe.word0,
7949 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7950 mboxq->mcqe.trailer);
7951exit:
7952 /* We are holding the token, no needed for lock when release */
7953 spin_lock_irqsave(&phba->hbalock, iflag);
7954 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7955 phba->sli.mbox_active = NULL;
7956 spin_unlock_irqrestore(&phba->hbalock, iflag);
7957 return rc;
7958}
7959
7960/**
7961 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7962 * @phba: Pointer to HBA context object.
7963 * @pmbox: Pointer to mailbox object.
7964 * @flag: Flag indicating how the mailbox need to be processed.
7965 *
7966 * This function is called by discovery code and HBA management code to submit
7967 * a mailbox command to firmware with SLI-4 interface spec.
7968 *
7969 * Return codes the caller owns the mailbox command after the return of the
7970 * function.
7971 **/
7972static int
7973lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7974 uint32_t flag)
7975{
7976 struct lpfc_sli *psli = &phba->sli;
7977 unsigned long iflags;
7978 int rc;
7979
James Smartb76f2dc2011-07-22 18:37:42 -04007980 /* dump from issue mailbox command if setup */
7981 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7982
James Smart8fa38512009-07-19 10:01:03 -04007983 rc = lpfc_mbox_dev_check(phba);
7984 if (unlikely(rc)) {
7985 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007986 "(%d):2544 Mailbox command x%x (x%x/x%x) "
James Smart8fa38512009-07-19 10:01:03 -04007987 "cannot issue Data: x%x x%x\n",
7988 mboxq->vport ? mboxq->vport->vpi : 0,
7989 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007990 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7991 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smart8fa38512009-07-19 10:01:03 -04007992 psli->sli_flag, flag);
7993 goto out_not_finished;
7994 }
7995
James Smartda0436e2009-05-22 14:51:39 -04007996 /* Detect polling mode and jump to a handler */
7997 if (!phba->sli4_hba.intr_enable) {
7998 if (flag == MBX_POLL)
7999 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8000 else
8001 rc = -EIO;
8002 if (rc != MBX_SUCCESS)
James Smart05580562011-05-24 11:40:48 -04008003 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
James Smartda0436e2009-05-22 14:51:39 -04008004 "(%d):2541 Mailbox command x%x "
James Smartcc459f12012-05-09 21:18:30 -04008005 "(x%x/x%x) failure: "
8006 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8007 "Data: x%x x%x\n,",
James Smartda0436e2009-05-22 14:51:39 -04008008 mboxq->vport ? mboxq->vport->vpi : 0,
8009 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008010 lpfc_sli_config_mbox_subsys_get(phba,
8011 mboxq),
8012 lpfc_sli_config_mbox_opcode_get(phba,
8013 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04008014 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8015 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8016 bf_get(lpfc_mcqe_ext_status,
8017 &mboxq->mcqe),
James Smartda0436e2009-05-22 14:51:39 -04008018 psli->sli_flag, flag);
8019 return rc;
8020 } else if (flag == MBX_POLL) {
James Smartf1126682009-06-10 17:22:44 -04008021 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8022 "(%d):2542 Try to issue mailbox command "
James Smarta183a152011-10-10 21:32:43 -04008023 "x%x (x%x/x%x) synchronously ahead of async"
James Smartf1126682009-06-10 17:22:44 -04008024 "mailbox command queue: x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04008025 mboxq->vport ? mboxq->vport->vpi : 0,
8026 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008027 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8028 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008029 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04008030 /* Try to block the asynchronous mailbox posting */
8031 rc = lpfc_sli4_async_mbox_block(phba);
8032 if (!rc) {
8033 /* Successfully blocked, now issue sync mbox cmd */
8034 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8035 if (rc != MBX_SUCCESS)
James Smartcc459f12012-05-09 21:18:30 -04008036 lpfc_printf_log(phba, KERN_WARNING,
James Smarta183a152011-10-10 21:32:43 -04008037 LOG_MBOX | LOG_SLI,
James Smartcc459f12012-05-09 21:18:30 -04008038 "(%d):2597 Sync Mailbox command "
8039 "x%x (x%x/x%x) failure: "
8040 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8041 "Data: x%x x%x\n,",
8042 mboxq->vport ? mboxq->vport->vpi : 0,
James Smarta183a152011-10-10 21:32:43 -04008043 mboxq->u.mb.mbxCommand,
8044 lpfc_sli_config_mbox_subsys_get(phba,
8045 mboxq),
8046 lpfc_sli_config_mbox_opcode_get(phba,
8047 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04008048 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8049 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8050 bf_get(lpfc_mcqe_ext_status,
8051 &mboxq->mcqe),
James Smarta183a152011-10-10 21:32:43 -04008052 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04008053 /* Unblock the async mailbox posting afterward */
8054 lpfc_sli4_async_mbox_unblock(phba);
8055 }
8056 return rc;
James Smartda0436e2009-05-22 14:51:39 -04008057 }
8058
8059 /* Now, interrupt mode asynchrous mailbox command */
8060 rc = lpfc_mbox_cmd_check(phba, mboxq);
8061 if (rc) {
8062 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008063 "(%d):2543 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008064 "cannot issue Data: x%x x%x\n",
8065 mboxq->vport ? mboxq->vport->vpi : 0,
8066 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008067 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8068 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008069 psli->sli_flag, flag);
8070 goto out_not_finished;
8071 }
James Smartda0436e2009-05-22 14:51:39 -04008072
8073 /* Put the mailbox command to the driver internal FIFO */
8074 psli->slistat.mbox_busy++;
8075 spin_lock_irqsave(&phba->hbalock, iflags);
8076 lpfc_mbox_put(phba, mboxq);
8077 spin_unlock_irqrestore(&phba->hbalock, iflags);
8078 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8079 "(%d):0354 Mbox cmd issue - Enqueue Data: "
James Smarta183a152011-10-10 21:32:43 -04008080 "x%x (x%x/x%x) x%x x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04008081 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8082 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
James Smarta183a152011-10-10 21:32:43 -04008083 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8084 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008085 phba->pport->port_state,
8086 psli->sli_flag, MBX_NOWAIT);
8087 /* Wake up worker thread to transport mailbox command from head */
8088 lpfc_worker_wake_up(phba);
8089
8090 return MBX_BUSY;
8091
8092out_not_finished:
8093 return MBX_NOT_FINISHED;
8094}
8095
8096/**
8097 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8098 * @phba: Pointer to HBA context object.
8099 *
8100 * This function is called by worker thread to send a mailbox command to
8101 * SLI4 HBA firmware.
8102 *
8103 **/
8104int
8105lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8106{
8107 struct lpfc_sli *psli = &phba->sli;
8108 LPFC_MBOXQ_t *mboxq;
8109 int rc = MBX_SUCCESS;
8110 unsigned long iflags;
8111 struct lpfc_mqe *mqe;
8112 uint32_t mbx_cmnd;
8113
8114 /* Check interrupt mode before post async mailbox command */
8115 if (unlikely(!phba->sli4_hba.intr_enable))
8116 return MBX_NOT_FINISHED;
8117
8118 /* Check for mailbox command service token */
8119 spin_lock_irqsave(&phba->hbalock, iflags);
8120 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8121 spin_unlock_irqrestore(&phba->hbalock, iflags);
8122 return MBX_NOT_FINISHED;
8123 }
8124 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8125 spin_unlock_irqrestore(&phba->hbalock, iflags);
8126 return MBX_NOT_FINISHED;
8127 }
8128 if (unlikely(phba->sli.mbox_active)) {
8129 spin_unlock_irqrestore(&phba->hbalock, iflags);
8130 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8131 "0384 There is pending active mailbox cmd\n");
8132 return MBX_NOT_FINISHED;
8133 }
8134 /* Take the mailbox command service token */
8135 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8136
8137 /* Get the next mailbox command from head of queue */
8138 mboxq = lpfc_mbox_get(phba);
8139
8140 /* If no more mailbox command waiting for post, we're done */
8141 if (!mboxq) {
8142 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8143 spin_unlock_irqrestore(&phba->hbalock, iflags);
8144 return MBX_SUCCESS;
8145 }
8146 phba->sli.mbox_active = mboxq;
8147 spin_unlock_irqrestore(&phba->hbalock, iflags);
8148
8149 /* Check device readiness for posting mailbox command */
8150 rc = lpfc_mbox_dev_check(phba);
8151 if (unlikely(rc))
8152 /* Driver clean routine will clean up pending mailbox */
8153 goto out_not_finished;
8154
8155 /* Prepare the mbox command to be posted */
8156 mqe = &mboxq->u.mqe;
8157 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8158
8159 /* Start timer for the mbox_tmo and log some mailbox post messages */
8160 mod_timer(&psli->mbox_tmo, (jiffies +
James Smart256ec0d2013-04-17 20:14:58 -04008161 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
James Smartda0436e2009-05-22 14:51:39 -04008162
8163 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008164 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
James Smartda0436e2009-05-22 14:51:39 -04008165 "x%x x%x\n",
8166 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
James Smarta183a152011-10-10 21:32:43 -04008167 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8168 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008169 phba->pport->port_state, psli->sli_flag);
8170
8171 if (mbx_cmnd != MBX_HEARTBEAT) {
8172 if (mboxq->vport) {
8173 lpfc_debugfs_disc_trc(mboxq->vport,
8174 LPFC_DISC_TRC_MBOX_VPORT,
8175 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8176 mbx_cmnd, mqe->un.mb_words[0],
8177 mqe->un.mb_words[1]);
8178 } else {
8179 lpfc_debugfs_disc_trc(phba->pport,
8180 LPFC_DISC_TRC_MBOX,
8181 "MBOX Send: cmd:x%x mb:x%x x%x",
8182 mbx_cmnd, mqe->un.mb_words[0],
8183 mqe->un.mb_words[1]);
8184 }
8185 }
8186 psli->slistat.mbox_cmd++;
8187
8188 /* Post the mailbox command to the port */
8189 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8190 if (rc != MBX_SUCCESS) {
8191 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008192 "(%d):2533 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008193 "cannot issue Data: x%x x%x\n",
8194 mboxq->vport ? mboxq->vport->vpi : 0,
8195 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008196 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8197 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008198 psli->sli_flag, MBX_NOWAIT);
8199 goto out_not_finished;
8200 }
8201
8202 return rc;
8203
8204out_not_finished:
8205 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartd7069f02012-03-01 22:36:29 -05008206 if (phba->sli.mbox_active) {
8207 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8208 __lpfc_mbox_cmpl_put(phba, mboxq);
8209 /* Release the token */
8210 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8211 phba->sli.mbox_active = NULL;
8212 }
James Smartda0436e2009-05-22 14:51:39 -04008213 spin_unlock_irqrestore(&phba->hbalock, iflags);
8214
8215 return MBX_NOT_FINISHED;
8216}
8217
8218/**
8219 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8220 * @phba: Pointer to HBA context object.
8221 * @pmbox: Pointer to mailbox object.
8222 * @flag: Flag indicating how the mailbox need to be processed.
8223 *
8224 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8225 * the API jump table function pointer from the lpfc_hba struct.
8226 *
8227 * Return codes the caller owns the mailbox command after the return of the
8228 * function.
8229 **/
8230int
8231lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8232{
8233 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8234}
8235
8236/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008237 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
James Smartda0436e2009-05-22 14:51:39 -04008238 * @phba: The hba struct for which this call is being executed.
8239 * @dev_grp: The HBA PCI-Device group number.
8240 *
8241 * This routine sets up the mbox interface API function jump table in @phba
8242 * struct.
8243 * Returns: 0 - success, -ENODEV - failure.
8244 **/
8245int
8246lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8247{
8248
8249 switch (dev_grp) {
8250 case LPFC_PCI_DEV_LP:
8251 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8252 phba->lpfc_sli_handle_slow_ring_event =
8253 lpfc_sli_handle_slow_ring_event_s3;
8254 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8255 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8256 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8257 break;
8258 case LPFC_PCI_DEV_OC:
8259 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8260 phba->lpfc_sli_handle_slow_ring_event =
8261 lpfc_sli_handle_slow_ring_event_s4;
8262 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8263 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8264 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8265 break;
8266 default:
8267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8268 "1420 Invalid HBA PCI-device group: 0x%x\n",
8269 dev_grp);
8270 return -ENODEV;
8271 break;
8272 }
8273 return 0;
8274}
8275
8276/**
James Smart3621a712009-04-06 18:47:14 -04008277 * __lpfc_sli_ringtx_put - Add an iocb to the txq
James Smarte59058c2008-08-24 21:49:00 -04008278 * @phba: Pointer to HBA context object.
8279 * @pring: Pointer to driver SLI ring object.
8280 * @piocb: Pointer to address of newly added command iocb.
8281 *
8282 * This function is called with hbalock held to add a command
8283 * iocb to the txq when SLI layer cannot submit the command iocb
8284 * to the ring.
8285 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04008286void
James Smart92d7f7b2007-06-17 19:56:38 -05008287__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05008288 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05008289{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01008290 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008291 /* Insert the caller's iocb in the txq tail for later processing. */
8292 list_add_tail(&piocb->list, &pring->txq);
dea31012005-04-17 16:05:31 -05008293}
8294
James Smarte59058c2008-08-24 21:49:00 -04008295/**
James Smart3621a712009-04-06 18:47:14 -04008296 * lpfc_sli_next_iocb - Get the next iocb in the txq
James Smarte59058c2008-08-24 21:49:00 -04008297 * @phba: Pointer to HBA context object.
8298 * @pring: Pointer to driver SLI ring object.
8299 * @piocb: Pointer to address of newly added command iocb.
8300 *
8301 * This function is called with hbalock held before a new
8302 * iocb is submitted to the firmware. This function checks
8303 * txq to flush the iocbs in txq to Firmware before
8304 * submitting new iocbs to the Firmware.
8305 * If there are iocbs in the txq which need to be submitted
8306 * to firmware, lpfc_sli_next_iocb returns the first element
8307 * of the txq after dequeuing it from txq.
8308 * If there is no iocb in the txq then the function will return
8309 * *piocb and *piocb is set to NULL. Caller needs to check
8310 * *piocb to find if there are more commands in the txq.
8311 **/
dea31012005-04-17 16:05:31 -05008312static struct lpfc_iocbq *
8313lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05008314 struct lpfc_iocbq **piocb)
dea31012005-04-17 16:05:31 -05008315{
8316 struct lpfc_iocbq * nextiocb;
8317
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01008318 lockdep_assert_held(&phba->hbalock);
8319
dea31012005-04-17 16:05:31 -05008320 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8321 if (!nextiocb) {
8322 nextiocb = *piocb;
8323 *piocb = NULL;
8324 }
8325
8326 return nextiocb;
8327}
8328
James Smarte59058c2008-08-24 21:49:00 -04008329/**
James Smart3772a992009-05-22 14:50:54 -04008330 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04008331 * @phba: Pointer to HBA context object.
James Smart3772a992009-05-22 14:50:54 -04008332 * @ring_number: SLI ring number to issue iocb on.
James Smarte59058c2008-08-24 21:49:00 -04008333 * @piocb: Pointer to command iocb.
8334 * @flag: Flag indicating if this command can be put into txq.
8335 *
James Smart3772a992009-05-22 14:50:54 -04008336 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8337 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8338 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8339 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8340 * this function allows only iocbs for posting buffers. This function finds
8341 * next available slot in the command ring and posts the command to the
8342 * available slot and writes the port attention register to request HBA start
8343 * processing new iocb. If there is no slot available in the ring and
8344 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8345 * the function returns IOCB_BUSY.
James Smarte59058c2008-08-24 21:49:00 -04008346 *
James Smart3772a992009-05-22 14:50:54 -04008347 * This function is called with hbalock held. The function will return success
8348 * after it successfully submit the iocb to firmware or after adding to the
8349 * txq.
James Smarte59058c2008-08-24 21:49:00 -04008350 **/
James Smart98c9ea52007-10-27 13:37:33 -04008351static int
James Smart3772a992009-05-22 14:50:54 -04008352__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea31012005-04-17 16:05:31 -05008353 struct lpfc_iocbq *piocb, uint32_t flag)
8354{
8355 struct lpfc_iocbq *nextiocb;
8356 IOCB_t *iocb;
James Smart895427b2017-02-12 13:52:30 -08008357 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -05008358
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01008359 lockdep_assert_held(&phba->hbalock);
8360
James Smart92d7f7b2007-06-17 19:56:38 -05008361 if (piocb->iocb_cmpl && (!piocb->vport) &&
8362 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8363 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8364 lpfc_printf_log(phba, KERN_ERR,
8365 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04008366 "1807 IOCB x%x failed. No vport\n",
James Smart92d7f7b2007-06-17 19:56:38 -05008367 piocb->iocb.ulpCommand);
8368 dump_stack();
8369 return IOCB_ERROR;
8370 }
8371
8372
Linas Vepstas8d63f372007-02-14 14:28:36 -06008373 /* If the PCI channel is in offline state, do not post iocbs. */
8374 if (unlikely(pci_channel_offline(phba->pcidev)))
8375 return IOCB_ERROR;
8376
James Smarta257bf92009-04-06 18:48:10 -04008377 /* If HBA has a deferred error attention, fail the iocb. */
8378 if (unlikely(phba->hba_flag & DEFER_ERATT))
8379 return IOCB_ERROR;
8380
dea31012005-04-17 16:05:31 -05008381 /*
8382 * We should never get an IOCB if we are in a < LINK_DOWN state
8383 */
James Smart2e0fef82007-06-17 19:56:36 -05008384 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea31012005-04-17 16:05:31 -05008385 return IOCB_ERROR;
8386
8387 /*
8388 * Check to see if we are blocking IOCB processing because of a
James Smart0b727fe2007-10-27 13:37:25 -04008389 * outstanding event.
dea31012005-04-17 16:05:31 -05008390 */
James Smart0b727fe2007-10-27 13:37:25 -04008391 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea31012005-04-17 16:05:31 -05008392 goto iocb_busy;
8393
James Smart2e0fef82007-06-17 19:56:36 -05008394 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea31012005-04-17 16:05:31 -05008395 /*
James Smart2680eea2007-04-25 09:52:55 -04008396 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea31012005-04-17 16:05:31 -05008397 * can be issued if the link is not up.
8398 */
8399 switch (piocb->iocb.ulpCommand) {
James Smart84774a42008-08-24 21:50:06 -04008400 case CMD_GEN_REQUEST64_CR:
8401 case CMD_GEN_REQUEST64_CX:
8402 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8403 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
James Smart6a9c52c2009-10-02 15:16:51 -04008404 FC_RCTL_DD_UNSOL_CMD) ||
James Smart84774a42008-08-24 21:50:06 -04008405 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8406 MENLO_TRANSPORT_TYPE))
8407
8408 goto iocb_busy;
8409 break;
dea31012005-04-17 16:05:31 -05008410 case CMD_QUE_RING_BUF_CN:
8411 case CMD_QUE_RING_BUF64_CN:
dea31012005-04-17 16:05:31 -05008412 /*
8413 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8414 * completion, iocb_cmpl MUST be 0.
8415 */
8416 if (piocb->iocb_cmpl)
8417 piocb->iocb_cmpl = NULL;
8418 /*FALLTHROUGH*/
8419 case CMD_CREATE_XRI_CR:
James Smart2680eea2007-04-25 09:52:55 -04008420 case CMD_CLOSE_XRI_CN:
8421 case CMD_CLOSE_XRI_CX:
dea31012005-04-17 16:05:31 -05008422 break;
8423 default:
8424 goto iocb_busy;
8425 }
8426
8427 /*
8428 * For FCP commands, we must be in a state where we can process link
8429 * attention events.
8430 */
James Smart895427b2017-02-12 13:52:30 -08008431 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
James Smart92d7f7b2007-06-17 19:56:38 -05008432 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea31012005-04-17 16:05:31 -05008433 goto iocb_busy;
James Smart92d7f7b2007-06-17 19:56:38 -05008434 }
dea31012005-04-17 16:05:31 -05008435
dea31012005-04-17 16:05:31 -05008436 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8437 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8438 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8439
8440 if (iocb)
8441 lpfc_sli_update_ring(phba, pring);
8442 else
8443 lpfc_sli_update_full_ring(phba, pring);
8444
8445 if (!piocb)
8446 return IOCB_SUCCESS;
8447
8448 goto out_busy;
8449
8450 iocb_busy:
8451 pring->stats.iocb_cmd_delay++;
8452
8453 out_busy:
8454
8455 if (!(flag & SLI_IOCB_RET_IOCB)) {
James Smart92d7f7b2007-06-17 19:56:38 -05008456 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea31012005-04-17 16:05:31 -05008457 return IOCB_SUCCESS;
8458 }
8459
8460 return IOCB_BUSY;
8461}
8462
James Smart3772a992009-05-22 14:50:54 -04008463/**
James Smart4f774512009-05-22 14:52:35 -04008464 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8465 * @phba: Pointer to HBA context object.
8466 * @piocb: Pointer to command iocb.
8467 * @sglq: Pointer to the scatter gather queue object.
8468 *
8469 * This routine converts the bpl or bde that is in the IOCB
8470 * to a sgl list for the sli4 hardware. The physical address
8471 * of the bpl/bde is converted back to a virtual address.
8472 * If the IOCB contains a BPL then the list of BDE's is
8473 * converted to sli4_sge's. If the IOCB contains a single
8474 * BDE then it is converted to a single sli_sge.
8475 * The IOCB is still in cpu endianess so the contents of
8476 * the bpl can be used without byte swapping.
8477 *
8478 * Returns valid XRI = Success, NO_XRI = Failure.
8479**/
8480static uint16_t
8481lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8482 struct lpfc_sglq *sglq)
8483{
8484 uint16_t xritag = NO_XRI;
8485 struct ulp_bde64 *bpl = NULL;
8486 struct ulp_bde64 bde;
8487 struct sli4_sge *sgl = NULL;
James Smart1b511972011-12-13 13:23:09 -05008488 struct lpfc_dmabuf *dmabuf;
James Smart4f774512009-05-22 14:52:35 -04008489 IOCB_t *icmd;
8490 int numBdes = 0;
8491 int i = 0;
James Smart63e801c2010-11-20 23:14:19 -05008492 uint32_t offset = 0; /* accumulated offset in the sg request list */
8493 int inbound = 0; /* number of sg reply entries inbound from firmware */
James Smart4f774512009-05-22 14:52:35 -04008494
8495 if (!piocbq || !sglq)
8496 return xritag;
8497
8498 sgl = (struct sli4_sge *)sglq->sgl;
8499 icmd = &piocbq->iocb;
James Smart6b5151f2012-01-18 16:24:06 -05008500 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8501 return sglq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04008502 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8503 numBdes = icmd->un.genreq64.bdl.bdeSize /
8504 sizeof(struct ulp_bde64);
8505 /* The addrHigh and addrLow fields within the IOCB
8506 * have not been byteswapped yet so there is no
8507 * need to swap them back.
8508 */
James Smart1b511972011-12-13 13:23:09 -05008509 if (piocbq->context3)
8510 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8511 else
8512 return xritag;
James Smart4f774512009-05-22 14:52:35 -04008513
James Smart1b511972011-12-13 13:23:09 -05008514 bpl = (struct ulp_bde64 *)dmabuf->virt;
James Smart4f774512009-05-22 14:52:35 -04008515 if (!bpl)
8516 return xritag;
8517
8518 for (i = 0; i < numBdes; i++) {
8519 /* Should already be byte swapped. */
James Smart28baac72010-02-12 14:42:03 -05008520 sgl->addr_hi = bpl->addrHigh;
8521 sgl->addr_lo = bpl->addrLow;
8522
James Smart05580562011-05-24 11:40:48 -04008523 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04008524 if ((i+1) == numBdes)
8525 bf_set(lpfc_sli4_sge_last, sgl, 1);
8526 else
8527 bf_set(lpfc_sli4_sge_last, sgl, 0);
James Smart28baac72010-02-12 14:42:03 -05008528 /* swap the size field back to the cpu so we
8529 * can assign it to the sgl.
8530 */
8531 bde.tus.w = le32_to_cpu(bpl->tus.w);
8532 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
James Smart63e801c2010-11-20 23:14:19 -05008533 /* The offsets in the sgl need to be accumulated
8534 * separately for the request and reply lists.
8535 * The request is always first, the reply follows.
8536 */
8537 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8538 /* add up the reply sg entries */
8539 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8540 inbound++;
8541 /* first inbound? reset the offset */
8542 if (inbound == 1)
8543 offset = 0;
8544 bf_set(lpfc_sli4_sge_offset, sgl, offset);
James Smartf9bb2da2011-10-10 21:34:11 -04008545 bf_set(lpfc_sli4_sge_type, sgl,
8546 LPFC_SGE_TYPE_DATA);
James Smart63e801c2010-11-20 23:14:19 -05008547 offset += bde.tus.f.bdeSize;
8548 }
James Smart546fc852011-03-11 16:06:29 -05008549 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04008550 bpl++;
8551 sgl++;
8552 }
8553 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8554 /* The addrHigh and addrLow fields of the BDE have not
8555 * been byteswapped yet so they need to be swapped
8556 * before putting them in the sgl.
8557 */
8558 sgl->addr_hi =
8559 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8560 sgl->addr_lo =
8561 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
James Smart05580562011-05-24 11:40:48 -04008562 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04008563 bf_set(lpfc_sli4_sge_last, sgl, 1);
8564 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart28baac72010-02-12 14:42:03 -05008565 sgl->sge_len =
8566 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
James Smart4f774512009-05-22 14:52:35 -04008567 }
8568 return sglq->sli4_xritag;
8569}
8570
8571/**
James Smart4f774512009-05-22 14:52:35 -04008572 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8573 * @phba: Pointer to HBA context object.
8574 * @piocb: Pointer to command iocb.
8575 * @wqe: Pointer to the work queue entry.
8576 *
8577 * This routine converts the iocb command to its Work Queue Entry
8578 * equivalent. The wqe pointer should not have any fields set when
8579 * this routine is called because it will memcpy over them.
8580 * This routine does not set the CQ_ID or the WQEC bits in the
8581 * wqe.
8582 *
8583 * Returns: 0 = Success, IOCB_ERROR = Failure.
8584 **/
8585static int
8586lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8587 union lpfc_wqe *wqe)
8588{
James Smart5ffc2662009-11-18 15:39:44 -05008589 uint32_t xmit_len = 0, total_len = 0;
James Smart4f774512009-05-22 14:52:35 -04008590 uint8_t ct = 0;
8591 uint32_t fip;
8592 uint32_t abort_tag;
8593 uint8_t command_type = ELS_COMMAND_NON_FIP;
8594 uint8_t cmnd;
8595 uint16_t xritag;
James Smartdcf2a4e2010-09-29 11:18:53 -04008596 uint16_t abrt_iotag;
8597 struct lpfc_iocbq *abrtiocbq;
James Smart4f774512009-05-22 14:52:35 -04008598 struct ulp_bde64 *bpl = NULL;
James Smartf0d9bcc2010-10-22 11:07:09 -04008599 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
James Smart5ffc2662009-11-18 15:39:44 -05008600 int numBdes, i;
8601 struct ulp_bde64 bde;
James Smartc31098c2011-04-16 11:03:33 -04008602 struct lpfc_nodelist *ndlp;
James Smartff78d8f2011-12-13 13:21:35 -05008603 uint32_t *pcmd;
James Smart1b511972011-12-13 13:23:09 -05008604 uint32_t if_type;
James Smart4f774512009-05-22 14:52:35 -04008605
James Smart45ed1192009-10-02 15:17:02 -04008606 fip = phba->hba_flag & HBA_FIP_SUPPORT;
James Smart4f774512009-05-22 14:52:35 -04008607 /* The fcp commands will set command type */
James Smart0c287582009-06-10 17:22:56 -04008608 if (iocbq->iocb_flag & LPFC_IO_FCP)
James Smart4f774512009-05-22 14:52:35 -04008609 command_type = FCP_COMMAND;
James Smartc8685952009-11-18 15:39:16 -05008610 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
James Smart0c287582009-06-10 17:22:56 -04008611 command_type = ELS_COMMAND_FIP;
8612 else
8613 command_type = ELS_COMMAND_NON_FIP;
8614
James Smartb5c53952016-03-31 14:12:30 -07008615 if (phba->fcp_embed_io)
8616 memset(wqe, 0, sizeof(union lpfc_wqe128));
James Smart4f774512009-05-22 14:52:35 -04008617 /* Some of the fields are in the right position already */
8618 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
James Smartf0d9bcc2010-10-22 11:07:09 -04008619 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
James Smart28d7f3d2014-05-21 08:05:28 -04008620 wqe->generic.wqe_com.word10 = 0;
James Smartb5c53952016-03-31 14:12:30 -07008621
8622 abort_tag = (uint32_t) iocbq->iotag;
8623 xritag = iocbq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04008624 /* words0-2 bpl convert bde */
8625 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
James Smart5ffc2662009-11-18 15:39:44 -05008626 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8627 sizeof(struct ulp_bde64);
James Smart4f774512009-05-22 14:52:35 -04008628 bpl = (struct ulp_bde64 *)
8629 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8630 if (!bpl)
8631 return IOCB_ERROR;
8632
8633 /* Should already be byte swapped. */
8634 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8635 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8636 /* swap the size field back to the cpu so we
8637 * can assign it to the sgl.
8638 */
8639 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
James Smart5ffc2662009-11-18 15:39:44 -05008640 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8641 total_len = 0;
8642 for (i = 0; i < numBdes; i++) {
8643 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8644 total_len += bde.tus.f.bdeSize;
8645 }
James Smart4f774512009-05-22 14:52:35 -04008646 } else
James Smart5ffc2662009-11-18 15:39:44 -05008647 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
James Smart4f774512009-05-22 14:52:35 -04008648
8649 iocbq->iocb.ulpIoTag = iocbq->iotag;
8650 cmnd = iocbq->iocb.ulpCommand;
8651
8652 switch (iocbq->iocb.ulpCommand) {
8653 case CMD_ELS_REQUEST64_CR:
James Smart93d1379e2012-05-09 21:19:34 -04008654 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8655 ndlp = iocbq->context_un.ndlp;
8656 else
8657 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04008658 if (!iocbq->iocb.ulpLe) {
8659 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8660 "2007 Only Limited Edition cmd Format"
8661 " supported 0x%x\n",
8662 iocbq->iocb.ulpCommand);
8663 return IOCB_ERROR;
8664 }
James Smartff78d8f2011-12-13 13:21:35 -05008665
James Smart5ffc2662009-11-18 15:39:44 -05008666 wqe->els_req.payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04008667 /* Els_reguest64 has a TMO */
8668 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8669 iocbq->iocb.ulpTimeout);
8670 /* Need a VF for word 4 set the vf bit*/
8671 bf_set(els_req64_vf, &wqe->els_req, 0);
8672 /* And a VFID for word 12 */
8673 bf_set(els_req64_vfid, &wqe->els_req, 0);
James Smart4f774512009-05-22 14:52:35 -04008674 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
James Smartf0d9bcc2010-10-22 11:07:09 -04008675 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8676 iocbq->iocb.ulpContext);
8677 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8678 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
James Smart4f774512009-05-22 14:52:35 -04008679 /* CCP CCPE PV PRI in word10 were set in the memcpy */
James Smartff78d8f2011-12-13 13:21:35 -05008680 if (command_type == ELS_COMMAND_FIP)
James Smartc8685952009-11-18 15:39:16 -05008681 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8682 >> LPFC_FIP_ELS_ID_SHIFT);
James Smartff78d8f2011-12-13 13:21:35 -05008683 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8684 iocbq->context2)->virt);
James Smart1b511972011-12-13 13:23:09 -05008685 if_type = bf_get(lpfc_sli_intf_if_type,
8686 &phba->sli4_hba.sli_intf);
8687 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
James Smartff78d8f2011-12-13 13:21:35 -05008688 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
James Smartcb69f7d2011-12-13 13:21:57 -05008689 *pcmd == ELS_CMD_SCR ||
James Smart6b5151f2012-01-18 16:24:06 -05008690 *pcmd == ELS_CMD_FDISC ||
James Smartbdcd2b92012-03-01 22:33:52 -05008691 *pcmd == ELS_CMD_LOGO ||
James Smartff78d8f2011-12-13 13:21:35 -05008692 *pcmd == ELS_CMD_PLOGI)) {
8693 bf_set(els_req64_sp, &wqe->els_req, 1);
8694 bf_set(els_req64_sid, &wqe->els_req,
8695 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04008696 if ((*pcmd == ELS_CMD_FLOGI) &&
8697 !(phba->fc_topology ==
8698 LPFC_TOPOLOGY_LOOP))
8699 bf_set(els_req64_sid, &wqe->els_req, 0);
James Smartff78d8f2011-12-13 13:21:35 -05008700 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8701 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
James Smarta7dd9c02012-05-09 21:16:50 -04008702 phba->vpi_ids[iocbq->vport->vpi]);
James Smart3ef6d242012-01-18 16:23:48 -05008703 } else if (pcmd && iocbq->context1) {
James Smartff78d8f2011-12-13 13:21:35 -05008704 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8705 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8706 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8707 }
James Smartc8685952009-11-18 15:39:16 -05008708 }
James Smart6d368e52011-05-24 11:44:12 -04008709 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8710 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04008711 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8712 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8713 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8714 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8715 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8716 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04008717 wqe->els_req.max_response_payload_len = total_len - xmit_len;
James Smart7851fe22011-07-22 18:36:52 -04008718 break;
James Smart5ffc2662009-11-18 15:39:44 -05008719 case CMD_XMIT_SEQUENCE64_CX:
James Smartf0d9bcc2010-10-22 11:07:09 -04008720 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8721 iocbq->iocb.un.ulpWord[3]);
8722 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04008723 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart5ffc2662009-11-18 15:39:44 -05008724 /* The entire sequence is transmitted for this IOCB */
8725 xmit_len = total_len;
8726 cmnd = CMD_XMIT_SEQUENCE64_CR;
James Smart1b511972011-12-13 13:23:09 -05008727 if (phba->link_flag & LS_LOOPBACK_MODE)
8728 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
James Smart4f774512009-05-22 14:52:35 -04008729 case CMD_XMIT_SEQUENCE64_CR:
James Smartf0d9bcc2010-10-22 11:07:09 -04008730 /* word3 iocb=io_tag32 wqe=reserved */
8731 wqe->xmit_sequence.rsvd3 = 0;
James Smart4f774512009-05-22 14:52:35 -04008732 /* word4 relative_offset memcpy */
8733 /* word5 r_ctl/df_ctl memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04008734 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8735 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8736 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8737 LPFC_WQE_IOD_WRITE);
8738 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8739 LPFC_WQE_LENLOC_WORD12);
8740 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
James Smart5ffc2662009-11-18 15:39:44 -05008741 wqe->xmit_sequence.xmit_len = xmit_len;
8742 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04008743 break;
James Smart4f774512009-05-22 14:52:35 -04008744 case CMD_XMIT_BCAST64_CN:
James Smartf0d9bcc2010-10-22 11:07:09 -04008745 /* word3 iocb=iotag32 wqe=seq_payload_len */
8746 wqe->xmit_bcast64.seq_payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04008747 /* word4 iocb=rsvd wqe=rsvd */
8748 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8749 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
James Smartf0d9bcc2010-10-22 11:07:09 -04008750 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04008751 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
James Smartf0d9bcc2010-10-22 11:07:09 -04008752 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8753 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8754 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8755 LPFC_WQE_LENLOC_WORD3);
8756 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04008757 break;
James Smart4f774512009-05-22 14:52:35 -04008758 case CMD_FCP_IWRITE64_CR:
8759 command_type = FCP_COMMAND_DATA_OUT;
James Smartf0d9bcc2010-10-22 11:07:09 -04008760 /* word3 iocb=iotag wqe=payload_offset_len */
8761 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04008762 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8763 xmit_len + sizeof(struct fcp_rsp));
8764 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8765 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04008766 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8767 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8768 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8769 iocbq->iocb.ulpFCP2Rcvy);
8770 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8771 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04008772 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8773 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8774 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04008775 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05008776 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05008777 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8778 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07008779 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8780 if (iocbq->priority) {
8781 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8782 (iocbq->priority << 1));
8783 } else {
James Smart1ba981f2014-02-20 09:56:45 -05008784 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8785 (phba->cfg_XLanePriority << 1));
8786 }
8787 }
James Smartb5c53952016-03-31 14:12:30 -07008788 /* Note, word 10 is already initialized to 0 */
8789
8790 if (phba->fcp_embed_io) {
8791 struct lpfc_scsi_buf *lpfc_cmd;
8792 struct sli4_sge *sgl;
8793 union lpfc_wqe128 *wqe128;
8794 struct fcp_cmnd *fcp_cmnd;
8795 uint32_t *ptr;
8796
8797 /* 128 byte wqe support here */
8798 wqe128 = (union lpfc_wqe128 *)wqe;
8799
8800 lpfc_cmd = iocbq->context1;
8801 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8802 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8803
8804 /* Word 0-2 - FCP_CMND */
8805 wqe128->generic.bde.tus.f.bdeFlags =
8806 BUFF_TYPE_BDE_IMMED;
8807 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8808 wqe128->generic.bde.addrHigh = 0;
8809 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8810
8811 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8812
8813 /* Word 22-29 FCP CMND Payload */
8814 ptr = &wqe128->words[22];
8815 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8816 }
James Smart7851fe22011-07-22 18:36:52 -04008817 break;
James Smartf0d9bcc2010-10-22 11:07:09 -04008818 case CMD_FCP_IREAD64_CR:
8819 /* word3 iocb=iotag wqe=payload_offset_len */
8820 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04008821 bf_set(payload_offset_len, &wqe->fcp_iread,
8822 xmit_len + sizeof(struct fcp_rsp));
8823 bf_set(cmd_buff_len, &wqe->fcp_iread,
8824 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04008825 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8826 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8827 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8828 iocbq->iocb.ulpFCP2Rcvy);
8829 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
James Smart4f774512009-05-22 14:52:35 -04008830 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04008831 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8832 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8833 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04008834 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05008835 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05008836 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8837 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07008838 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8839 if (iocbq->priority) {
8840 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8841 (iocbq->priority << 1));
8842 } else {
James Smart1ba981f2014-02-20 09:56:45 -05008843 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8844 (phba->cfg_XLanePriority << 1));
8845 }
8846 }
James Smartb5c53952016-03-31 14:12:30 -07008847 /* Note, word 10 is already initialized to 0 */
8848
8849 if (phba->fcp_embed_io) {
8850 struct lpfc_scsi_buf *lpfc_cmd;
8851 struct sli4_sge *sgl;
8852 union lpfc_wqe128 *wqe128;
8853 struct fcp_cmnd *fcp_cmnd;
8854 uint32_t *ptr;
8855
8856 /* 128 byte wqe support here */
8857 wqe128 = (union lpfc_wqe128 *)wqe;
8858
8859 lpfc_cmd = iocbq->context1;
8860 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8861 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8862
8863 /* Word 0-2 - FCP_CMND */
8864 wqe128->generic.bde.tus.f.bdeFlags =
8865 BUFF_TYPE_BDE_IMMED;
8866 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8867 wqe128->generic.bde.addrHigh = 0;
8868 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8869
8870 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8871
8872 /* Word 22-29 FCP CMND Payload */
8873 ptr = &wqe128->words[22];
8874 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8875 }
James Smart7851fe22011-07-22 18:36:52 -04008876 break;
James Smartf1126682009-06-10 17:22:44 -04008877 case CMD_FCP_ICMND64_CR:
James Smart0ba4b212013-10-10 12:22:38 -04008878 /* word3 iocb=iotag wqe=payload_offset_len */
8879 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8880 bf_set(payload_offset_len, &wqe->fcp_icmd,
8881 xmit_len + sizeof(struct fcp_rsp));
8882 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8883 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04008884 /* word3 iocb=IO_TAG wqe=reserved */
James Smartf0d9bcc2010-10-22 11:07:09 -04008885 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
James Smartf1126682009-06-10 17:22:44 -04008886 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04008887 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8888 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8889 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8890 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8891 LPFC_WQE_LENLOC_NONE);
James Smart2a94aea2012-09-29 11:30:31 -04008892 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8893 iocbq->iocb.ulpFCP2Rcvy);
James Smart1ba981f2014-02-20 09:56:45 -05008894 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8895 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07008896 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8897 if (iocbq->priority) {
8898 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8899 (iocbq->priority << 1));
8900 } else {
James Smart1ba981f2014-02-20 09:56:45 -05008901 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8902 (phba->cfg_XLanePriority << 1));
8903 }
8904 }
James Smartb5c53952016-03-31 14:12:30 -07008905 /* Note, word 10 is already initialized to 0 */
8906
8907 if (phba->fcp_embed_io) {
8908 struct lpfc_scsi_buf *lpfc_cmd;
8909 struct sli4_sge *sgl;
8910 union lpfc_wqe128 *wqe128;
8911 struct fcp_cmnd *fcp_cmnd;
8912 uint32_t *ptr;
8913
8914 /* 128 byte wqe support here */
8915 wqe128 = (union lpfc_wqe128 *)wqe;
8916
8917 lpfc_cmd = iocbq->context1;
8918 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8919 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8920
8921 /* Word 0-2 - FCP_CMND */
8922 wqe128->generic.bde.tus.f.bdeFlags =
8923 BUFF_TYPE_BDE_IMMED;
8924 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8925 wqe128->generic.bde.addrHigh = 0;
8926 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8927
8928 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8929
8930 /* Word 22-29 FCP CMND Payload */
8931 ptr = &wqe128->words[22];
8932 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8933 }
James Smart7851fe22011-07-22 18:36:52 -04008934 break;
James Smart4f774512009-05-22 14:52:35 -04008935 case CMD_GEN_REQUEST64_CR:
James Smart63e801c2010-11-20 23:14:19 -05008936 /* For this command calculate the xmit length of the
8937 * request bde.
8938 */
8939 xmit_len = 0;
8940 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8941 sizeof(struct ulp_bde64);
8942 for (i = 0; i < numBdes; i++) {
James Smart63e801c2010-11-20 23:14:19 -05008943 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
James Smart546fc852011-03-11 16:06:29 -05008944 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8945 break;
James Smart63e801c2010-11-20 23:14:19 -05008946 xmit_len += bde.tus.f.bdeSize;
8947 }
James Smartf0d9bcc2010-10-22 11:07:09 -04008948 /* word3 iocb=IO_TAG wqe=request_payload_len */
8949 wqe->gen_req.request_payload_len = xmit_len;
8950 /* word4 iocb=parameter wqe=relative_offset memcpy */
8951 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
James Smart4f774512009-05-22 14:52:35 -04008952 /* word6 context tag copied in memcpy */
8953 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8954 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8956 "2015 Invalid CT %x command 0x%x\n",
8957 ct, iocbq->iocb.ulpCommand);
8958 return IOCB_ERROR;
8959 }
James Smartf0d9bcc2010-10-22 11:07:09 -04008960 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8961 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8962 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8963 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8964 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8965 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8966 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8967 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04008968 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
James Smart4f774512009-05-22 14:52:35 -04008969 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04008970 break;
James Smart4f774512009-05-22 14:52:35 -04008971 case CMD_XMIT_ELS_RSP64_CX:
James Smartc31098c2011-04-16 11:03:33 -04008972 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04008973 /* words0-2 BDE memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04008974 /* word3 iocb=iotag32 wqe=response_payload_len */
8975 wqe->xmit_els_rsp.response_payload_len = xmit_len;
James Smart939723a2012-05-09 21:19:03 -04008976 /* word4 */
8977 wqe->xmit_els_rsp.word4 = 0;
James Smart4f774512009-05-22 14:52:35 -04008978 /* word5 iocb=rsvd wge=did */
8979 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
James Smart939723a2012-05-09 21:19:03 -04008980 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8981
8982 if_type = bf_get(lpfc_sli_intf_if_type,
8983 &phba->sli4_hba.sli_intf);
8984 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8985 if (iocbq->vport->fc_flag & FC_PT2PT) {
8986 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8987 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8988 iocbq->vport->fc_myDID);
8989 if (iocbq->vport->fc_myDID == Fabric_DID) {
8990 bf_set(wqe_els_did,
8991 &wqe->xmit_els_rsp.wqe_dest, 0);
8992 }
8993 }
8994 }
James Smartf0d9bcc2010-10-22 11:07:09 -04008995 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8996 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8997 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8998 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04008999 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart4f774512009-05-22 14:52:35 -04009000 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
James Smartf0d9bcc2010-10-22 11:07:09 -04009001 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smart6d368e52011-05-24 11:44:12 -04009002 phba->vpi_ids[iocbq->vport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009003 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9004 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9005 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9006 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9007 LPFC_WQE_LENLOC_WORD3);
9008 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
James Smart6d368e52011-05-24 11:44:12 -04009009 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9010 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartff78d8f2011-12-13 13:21:35 -05009011 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9012 iocbq->context2)->virt);
9013 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
James Smart939723a2012-05-09 21:19:03 -04009014 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9015 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
James Smartff78d8f2011-12-13 13:21:35 -05009016 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04009017 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9018 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smartff78d8f2011-12-13 13:21:35 -05009019 phba->vpi_ids[phba->pport->vpi]);
9020 }
James Smart4f774512009-05-22 14:52:35 -04009021 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04009022 break;
James Smart4f774512009-05-22 14:52:35 -04009023 case CMD_CLOSE_XRI_CN:
9024 case CMD_ABORT_XRI_CN:
9025 case CMD_ABORT_XRI_CX:
9026 /* words 0-2 memcpy should be 0 rserved */
9027 /* port will send abts */
James Smartdcf2a4e2010-09-29 11:18:53 -04009028 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9029 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9030 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9031 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9032 } else
9033 fip = 0;
9034
9035 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
James Smart4f774512009-05-22 14:52:35 -04009036 /*
James Smartdcf2a4e2010-09-29 11:18:53 -04009037 * The link is down, or the command was ELS_FIP
9038 * so the fw does not need to send abts
James Smart4f774512009-05-22 14:52:35 -04009039 * on the wire.
9040 */
9041 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9042 else
9043 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9044 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
James Smartf0d9bcc2010-10-22 11:07:09 -04009045 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9046 wqe->abort_cmd.rsrvd5 = 0;
9047 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04009048 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9049 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
James Smart4f774512009-05-22 14:52:35 -04009050 /*
9051 * The abort handler will send us CMD_ABORT_XRI_CN or
9052 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9053 */
James Smartf0d9bcc2010-10-22 11:07:09 -04009054 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9055 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9056 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9057 LPFC_WQE_LENLOC_NONE);
James Smart4f774512009-05-22 14:52:35 -04009058 cmnd = CMD_ABORT_XRI_CX;
9059 command_type = OTHER_COMMAND;
9060 xritag = 0;
James Smart7851fe22011-07-22 18:36:52 -04009061 break;
James Smart6669f9b2009-10-02 15:16:45 -04009062 case CMD_XMIT_BLS_RSP64_CX:
James Smart6b5151f2012-01-18 16:24:06 -05009063 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart546fc852011-03-11 16:06:29 -05009064 /* As BLS ABTS RSP WQE is very different from other WQEs,
James Smart6669f9b2009-10-02 15:16:45 -04009065 * we re-construct this WQE here based on information in
9066 * iocbq from scratch.
9067 */
9068 memset(wqe, 0, sizeof(union lpfc_wqe));
James Smart5ffc2662009-11-18 15:39:44 -05009069 /* OX_ID is invariable to who sent ABTS to CT exchange */
James Smart6669f9b2009-10-02 15:16:45 -04009070 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05009071 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9072 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
James Smart5ffc2662009-11-18 15:39:44 -05009073 LPFC_ABTS_UNSOL_INT) {
9074 /* ABTS sent by initiator to CT exchange, the
9075 * RX_ID field will be filled with the newly
9076 * allocated responder XRI.
9077 */
9078 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9079 iocbq->sli4_xritag);
9080 } else {
9081 /* ABTS sent by responder to CT exchange, the
9082 * RX_ID field will be filled with the responder
9083 * RX_ID from ABTS.
9084 */
9085 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05009086 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
James Smart5ffc2662009-11-18 15:39:44 -05009087 }
James Smart6669f9b2009-10-02 15:16:45 -04009088 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9089 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
James Smart6b5151f2012-01-18 16:24:06 -05009090
9091 /* Use CT=VPI */
9092 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9093 ndlp->nlp_DID);
9094 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9095 iocbq->iocb.ulpContext);
9096 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
James Smart6669f9b2009-10-02 15:16:45 -04009097 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
James Smart6b5151f2012-01-18 16:24:06 -05009098 phba->vpi_ids[phba->pport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009099 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9100 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9101 LPFC_WQE_LENLOC_NONE);
James Smart6669f9b2009-10-02 15:16:45 -04009102 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9103 command_type = OTHER_COMMAND;
James Smart546fc852011-03-11 16:06:29 -05009104 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9105 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9106 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9107 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9108 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9109 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9110 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9111 }
9112
James Smart7851fe22011-07-22 18:36:52 -04009113 break;
James Smart4f774512009-05-22 14:52:35 -04009114 case CMD_XRI_ABORTED_CX:
9115 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
James Smart4f774512009-05-22 14:52:35 -04009116 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9117 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9118 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9119 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9120 default:
9121 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9122 "2014 Invalid command 0x%x\n",
9123 iocbq->iocb.ulpCommand);
9124 return IOCB_ERROR;
James Smart7851fe22011-07-22 18:36:52 -04009125 break;
James Smart4f774512009-05-22 14:52:35 -04009126 }
James Smart6d368e52011-05-24 11:44:12 -04009127
James Smart8012cc32012-10-31 14:44:49 -04009128 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9129 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9130 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9131 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9132 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9133 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9134 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9135 LPFC_IO_DIF_INSERT);
James Smartf0d9bcc2010-10-22 11:07:09 -04009136 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9137 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9138 wqe->generic.wqe_com.abort_tag = abort_tag;
9139 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9140 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9141 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9142 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smart4f774512009-05-22 14:52:35 -04009143 return 0;
9144}
9145
9146/**
9147 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9148 * @phba: Pointer to HBA context object.
9149 * @ring_number: SLI ring number to issue iocb on.
9150 * @piocb: Pointer to command iocb.
9151 * @flag: Flag indicating if this command can be put into txq.
9152 *
9153 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9154 * an iocb command to an HBA with SLI-4 interface spec.
9155 *
9156 * This function is called with hbalock held. The function will return success
9157 * after it successfully submit the iocb to firmware or after adding to the
9158 * txq.
9159 **/
9160static int
9161__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9162 struct lpfc_iocbq *piocb, uint32_t flag)
9163{
9164 struct lpfc_sglq *sglq;
James Smartb5c53952016-03-31 14:12:30 -07009165 union lpfc_wqe *wqe;
9166 union lpfc_wqe128 wqe128;
James Smart1ba981f2014-02-20 09:56:45 -05009167 struct lpfc_queue *wq;
James Smart895427b2017-02-12 13:52:30 -08009168 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04009169
James Smart895427b2017-02-12 13:52:30 -08009170 /* Get the WQ */
9171 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9172 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9173 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9174 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9175 else
9176 wq = phba->sli4_hba.oas_wq;
9177 } else {
9178 wq = phba->sli4_hba.els_wq;
9179 }
9180
9181 /* Get corresponding ring */
9182 pring = wq->pring;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009183
James Smartb5c53952016-03-31 14:12:30 -07009184 /*
9185 * The WQE can be either 64 or 128 bytes,
9186 * so allocate space on the stack assuming the largest.
9187 */
9188 wqe = (union lpfc_wqe *)&wqe128;
9189
James Smart895427b2017-02-12 13:52:30 -08009190 lockdep_assert_held(&phba->hbalock);
9191
James Smart4f774512009-05-22 14:52:35 -04009192 if (piocb->sli4_xritag == NO_XRI) {
9193 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
James Smart6b5151f2012-01-18 16:24:06 -05009194 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
James Smart4f774512009-05-22 14:52:35 -04009195 sglq = NULL;
9196 else {
James Smart0e9bb8d2013-03-01 16:35:12 -05009197 if (!list_empty(&pring->txq)) {
James Smart2a9bf3d2010-06-07 15:24:45 -04009198 if (!(flag & SLI_IOCB_RET_IOCB)) {
9199 __lpfc_sli_ringtx_put(phba,
9200 pring, piocb);
9201 return IOCB_SUCCESS;
9202 } else {
9203 return IOCB_BUSY;
9204 }
9205 } else {
James Smart895427b2017-02-12 13:52:30 -08009206 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04009207 if (!sglq) {
9208 if (!(flag & SLI_IOCB_RET_IOCB)) {
9209 __lpfc_sli_ringtx_put(phba,
9210 pring,
9211 piocb);
9212 return IOCB_SUCCESS;
9213 } else
9214 return IOCB_BUSY;
9215 }
9216 }
James Smart4f774512009-05-22 14:52:35 -04009217 }
James Smart2ea259e2017-02-12 13:52:27 -08009218 } else if (piocb->iocb_flag & LPFC_IO_FCP)
James Smart6d368e52011-05-24 11:44:12 -04009219 /* These IO's already have an XRI and a mapped sgl. */
9220 sglq = NULL;
James Smart2ea259e2017-02-12 13:52:27 -08009221 else {
James Smart6d368e52011-05-24 11:44:12 -04009222 /*
9223 * This is a continuation of a commandi,(CX) so this
James Smart4f774512009-05-22 14:52:35 -04009224 * sglq is on the active list
9225 */
James Smartedccdc12013-01-03 15:43:45 -05009226 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
James Smart4f774512009-05-22 14:52:35 -04009227 if (!sglq)
9228 return IOCB_ERROR;
9229 }
9230
9231 if (sglq) {
James Smart6d368e52011-05-24 11:44:12 -04009232 piocb->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -04009233 piocb->sli4_xritag = sglq->sli4_xritag;
James Smart2a9bf3d2010-06-07 15:24:45 -04009234 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
James Smart4f774512009-05-22 14:52:35 -04009235 return IOCB_ERROR;
9236 }
9237
James Smartb5c53952016-03-31 14:12:30 -07009238 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
James Smart4f774512009-05-22 14:52:35 -04009239 return IOCB_ERROR;
9240
James Smart895427b2017-02-12 13:52:30 -08009241 if (lpfc_sli4_wq_put(wq, wqe))
9242 return IOCB_ERROR;
James Smart4f774512009-05-22 14:52:35 -04009243 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9244
9245 return 0;
9246}
9247
9248/**
James Smart3772a992009-05-22 14:50:54 -04009249 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9250 *
9251 * This routine wraps the actual lockless version for issusing IOCB function
9252 * pointer from the lpfc_hba struct.
9253 *
9254 * Return codes:
James Smartb5c53952016-03-31 14:12:30 -07009255 * IOCB_ERROR - Error
9256 * IOCB_SUCCESS - Success
9257 * IOCB_BUSY - Busy
James Smart3772a992009-05-22 14:50:54 -04009258 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04009259int
James Smart3772a992009-05-22 14:50:54 -04009260__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9261 struct lpfc_iocbq *piocb, uint32_t flag)
9262{
9263 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9264}
9265
9266/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009267 * lpfc_sli_api_table_setup - Set up sli api function jump table
James Smart3772a992009-05-22 14:50:54 -04009268 * @phba: The hba struct for which this call is being executed.
9269 * @dev_grp: The HBA PCI-Device group number.
9270 *
9271 * This routine sets up the SLI interface API function jump table in @phba
9272 * struct.
9273 * Returns: 0 - success, -ENODEV - failure.
9274 **/
9275int
9276lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9277{
9278
9279 switch (dev_grp) {
9280 case LPFC_PCI_DEV_LP:
9281 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9282 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9283 break;
James Smart4f774512009-05-22 14:52:35 -04009284 case LPFC_PCI_DEV_OC:
9285 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9286 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9287 break;
James Smart3772a992009-05-22 14:50:54 -04009288 default:
9289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9290 "1419 Invalid HBA PCI-device group: 0x%x\n",
9291 dev_grp);
9292 return -ENODEV;
9293 break;
9294 }
9295 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9296 return 0;
9297}
James Smart92d7f7b2007-06-17 19:56:38 -05009298
James Smarta1efe162015-05-21 13:55:20 -04009299/**
James Smart895427b2017-02-12 13:52:30 -08009300 * lpfc_sli4_calc_ring - Calculates which ring to use
James Smarta1efe162015-05-21 13:55:20 -04009301 * @phba: Pointer to HBA context object.
James Smarta1efe162015-05-21 13:55:20 -04009302 * @piocb: Pointer to command iocb.
9303 *
James Smart895427b2017-02-12 13:52:30 -08009304 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9305 * hba_wqidx, thus we need to calculate the corresponding ring.
James Smarta1efe162015-05-21 13:55:20 -04009306 * Since ABORTS must go on the same WQ of the command they are
James Smart895427b2017-02-12 13:52:30 -08009307 * aborting, we use command's hba_wqidx.
James Smarta1efe162015-05-21 13:55:20 -04009308 */
James Smart895427b2017-02-12 13:52:30 -08009309struct lpfc_sli_ring *
9310lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
James Smart9bd2bff52014-09-03 12:57:30 -04009311{
James Smart895427b2017-02-12 13:52:30 -08009312 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
James Smart8b0dff12015-05-22 10:42:38 -04009313 if (!(phba->cfg_fof) ||
James Smart895427b2017-02-12 13:52:30 -08009314 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
James Smart8b0dff12015-05-22 10:42:38 -04009315 if (unlikely(!phba->sli4_hba.fcp_wq))
James Smart895427b2017-02-12 13:52:30 -08009316 return NULL;
James Smart8b0dff12015-05-22 10:42:38 -04009317 /*
James Smart895427b2017-02-12 13:52:30 -08009318 * for abort iocb hba_wqidx should already
James Smart8b0dff12015-05-22 10:42:38 -04009319 * be setup based on what work queue we used.
9320 */
9321 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
James Smart895427b2017-02-12 13:52:30 -08009322 piocb->hba_wqidx =
James Smart8b0dff12015-05-22 10:42:38 -04009323 lpfc_sli4_scmd_to_wqidx_distr(phba,
9324 piocb->context1);
James Smart895427b2017-02-12 13:52:30 -08009325 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
James Smart8b0dff12015-05-22 10:42:38 -04009326 } else {
9327 if (unlikely(!phba->sli4_hba.oas_wq))
James Smart895427b2017-02-12 13:52:30 -08009328 return NULL;
9329 piocb->hba_wqidx = 0;
9330 return phba->sli4_hba.oas_wq->pring;
James Smart9bd2bff52014-09-03 12:57:30 -04009331 }
James Smart895427b2017-02-12 13:52:30 -08009332 } else {
9333 if (unlikely(!phba->sli4_hba.els_wq))
9334 return NULL;
9335 piocb->hba_wqidx = 0;
9336 return phba->sli4_hba.els_wq->pring;
James Smart9bd2bff52014-09-03 12:57:30 -04009337 }
James Smart9bd2bff52014-09-03 12:57:30 -04009338}
9339
James Smarte59058c2008-08-24 21:49:00 -04009340/**
James Smart3621a712009-04-06 18:47:14 -04009341 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04009342 * @phba: Pointer to HBA context object.
9343 * @pring: Pointer to driver SLI ring object.
9344 * @piocb: Pointer to command iocb.
9345 * @flag: Flag indicating if this command can be put into txq.
9346 *
9347 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9348 * function. This function gets the hbalock and calls
9349 * __lpfc_sli_issue_iocb function and will return the error returned
9350 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9351 * functions which do not hold hbalock.
9352 **/
James Smart92d7f7b2007-06-17 19:56:38 -05009353int
James Smart3772a992009-05-22 14:50:54 -04009354lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
James Smart92d7f7b2007-06-17 19:56:38 -05009355 struct lpfc_iocbq *piocb, uint32_t flag)
9356{
James Smart895427b2017-02-12 13:52:30 -08009357 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart2a76a282012-08-03 12:35:54 -04009358 struct lpfc_sli_ring *pring;
James Smartba20c852012-08-03 12:36:52 -04009359 struct lpfc_queue *fpeq;
9360 struct lpfc_eqe *eqe;
James Smart92d7f7b2007-06-17 19:56:38 -05009361 unsigned long iflags;
James Smart2a76a282012-08-03 12:35:54 -04009362 int rc, idx;
James Smart92d7f7b2007-06-17 19:56:38 -05009363
James Smart7e56aa22012-08-03 12:35:34 -04009364 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -08009365 pring = lpfc_sli4_calc_ring(phba, piocb);
9366 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -04009367 return IOCB_ERROR;
James Smartba20c852012-08-03 12:36:52 -04009368
James Smart9bd2bff52014-09-03 12:57:30 -04009369 spin_lock_irqsave(&pring->ring_lock, iflags);
9370 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9371 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smartba20c852012-08-03 12:36:52 -04009372
James Smart9bd2bff52014-09-03 12:57:30 -04009373 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
James Smart895427b2017-02-12 13:52:30 -08009374 idx = piocb->hba_wqidx;
9375 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
James Smartba20c852012-08-03 12:36:52 -04009376
James Smart895427b2017-02-12 13:52:30 -08009377 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
James Smartba20c852012-08-03 12:36:52 -04009378
James Smart9bd2bff52014-09-03 12:57:30 -04009379 /* Get associated EQ with this index */
9380 fpeq = phba->sli4_hba.hba_eq[idx];
James Smartba20c852012-08-03 12:36:52 -04009381
James Smart9bd2bff52014-09-03 12:57:30 -04009382 /* Turn off interrupts from this EQ */
9383 lpfc_sli4_eq_clr_intr(fpeq);
James Smartba20c852012-08-03 12:36:52 -04009384
James Smart9bd2bff52014-09-03 12:57:30 -04009385 /*
9386 * Process all the events on FCP EQ
9387 */
9388 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9389 lpfc_sli4_hba_handle_eqe(phba,
9390 eqe, idx);
9391 fpeq->EQ_processed++;
James Smartba20c852012-08-03 12:36:52 -04009392 }
James Smartba20c852012-08-03 12:36:52 -04009393
James Smart9bd2bff52014-09-03 12:57:30 -04009394 /* Always clear and re-arm the EQ */
9395 lpfc_sli4_eq_release(fpeq,
9396 LPFC_QUEUE_REARM);
9397 }
James Smart895427b2017-02-12 13:52:30 -08009398 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smart2a76a282012-08-03 12:35:54 -04009399 }
James Smart7e56aa22012-08-03 12:35:34 -04009400 } else {
9401 /* For now, SLI2/3 will still use hbalock */
9402 spin_lock_irqsave(&phba->hbalock, iflags);
9403 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9404 spin_unlock_irqrestore(&phba->hbalock, iflags);
9405 }
James Smart92d7f7b2007-06-17 19:56:38 -05009406 return rc;
9407}
9408
James Smarte59058c2008-08-24 21:49:00 -04009409/**
James Smart3621a712009-04-06 18:47:14 -04009410 * lpfc_extra_ring_setup - Extra ring setup function
James Smarte59058c2008-08-24 21:49:00 -04009411 * @phba: Pointer to HBA context object.
9412 *
9413 * This function is called while driver attaches with the
9414 * HBA to setup the extra ring. The extra ring is used
9415 * only when driver needs to support target mode functionality
9416 * or IP over FC functionalities.
9417 *
James Smart895427b2017-02-12 13:52:30 -08009418 * This function is called with no lock held. SLI3 only.
James Smarte59058c2008-08-24 21:49:00 -04009419 **/
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009420static int
9421lpfc_extra_ring_setup( struct lpfc_hba *phba)
9422{
9423 struct lpfc_sli *psli;
9424 struct lpfc_sli_ring *pring;
9425
9426 psli = &phba->sli;
9427
9428 /* Adjust cmd/rsp ring iocb entries more evenly */
James Smarta4bc3372006-12-02 13:34:16 -05009429
9430 /* Take some away from the FCP ring */
James Smart895427b2017-02-12 13:52:30 -08009431 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smart7e56aa22012-08-03 12:35:34 -04009432 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9433 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9434 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9435 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009436
James Smarta4bc3372006-12-02 13:34:16 -05009437 /* and give them to the extra ring */
James Smart895427b2017-02-12 13:52:30 -08009438 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
James Smarta4bc3372006-12-02 13:34:16 -05009439
James Smart7e56aa22012-08-03 12:35:34 -04009440 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9441 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9442 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9443 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009444
9445 /* Setup default profile for this ring */
9446 pring->iotag_max = 4096;
9447 pring->num_mask = 1;
9448 pring->prt[0].profile = 0; /* Mask 0 */
James Smarta4bc3372006-12-02 13:34:16 -05009449 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9450 pring->prt[0].type = phba->cfg_multi_ring_type;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009451 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9452 return 0;
9453}
9454
James Smartcb69f7d2011-12-13 13:21:57 -05009455/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9456 * @phba: Pointer to HBA context object.
9457 * @iocbq: Pointer to iocb object.
9458 *
9459 * The async_event handler calls this routine when it receives
9460 * an ASYNC_STATUS_CN event from the port. The port generates
9461 * this event when an Abort Sequence request to an rport fails
9462 * twice in succession. The abort could be originated by the
9463 * driver or by the port. The ABTS could have been for an ELS
9464 * or FCP IO. The port only generates this event when an ABTS
9465 * fails to complete after one retry.
9466 */
9467static void
9468lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9469 struct lpfc_iocbq *iocbq)
9470{
9471 struct lpfc_nodelist *ndlp = NULL;
9472 uint16_t rpi = 0, vpi = 0;
9473 struct lpfc_vport *vport = NULL;
9474
9475 /* The rpi in the ulpContext is vport-sensitive. */
9476 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9477 rpi = iocbq->iocb.ulpContext;
9478
9479 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9480 "3092 Port generated ABTS async event "
9481 "on vpi %d rpi %d status 0x%x\n",
9482 vpi, rpi, iocbq->iocb.ulpStatus);
9483
9484 vport = lpfc_find_vport_by_vpid(phba, vpi);
9485 if (!vport)
9486 goto err_exit;
9487 ndlp = lpfc_findnode_rpi(vport, rpi);
9488 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9489 goto err_exit;
9490
9491 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9492 lpfc_sli_abts_recover_port(vport, ndlp);
9493 return;
9494
9495 err_exit:
9496 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9497 "3095 Event Context not found, no "
9498 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9499 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9500 vpi, rpi);
9501}
9502
9503/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9504 * @phba: pointer to HBA context object.
9505 * @ndlp: nodelist pointer for the impacted rport.
9506 * @axri: pointer to the wcqe containing the failed exchange.
9507 *
9508 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9509 * port. The port generates this event when an abort exchange request to an
9510 * rport fails twice in succession with no reply. The abort could be originated
9511 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9512 */
9513void
9514lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9515 struct lpfc_nodelist *ndlp,
9516 struct sli4_wcqe_xri_aborted *axri)
9517{
9518 struct lpfc_vport *vport;
James Smart5c1db2a2012-03-01 22:34:36 -05009519 uint32_t ext_status = 0;
James Smartcb69f7d2011-12-13 13:21:57 -05009520
James Smart6b5151f2012-01-18 16:24:06 -05009521 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
James Smartcb69f7d2011-12-13 13:21:57 -05009522 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9523 "3115 Node Context not found, driver "
9524 "ignoring abts err event\n");
James Smart6b5151f2012-01-18 16:24:06 -05009525 return;
9526 }
9527
James Smartcb69f7d2011-12-13 13:21:57 -05009528 vport = ndlp->vport;
9529 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9530 "3116 Port generated FCP XRI ABORT event on "
James Smart5c1db2a2012-03-01 22:34:36 -05009531 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
James Smart8e668af2013-05-31 17:04:28 -04009532 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
James Smartcb69f7d2011-12-13 13:21:57 -05009533 bf_get(lpfc_wcqe_xa_xri, axri),
James Smart5c1db2a2012-03-01 22:34:36 -05009534 bf_get(lpfc_wcqe_xa_status, axri),
9535 axri->parameter);
James Smartcb69f7d2011-12-13 13:21:57 -05009536
James Smart5c1db2a2012-03-01 22:34:36 -05009537 /*
9538 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9539 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9540 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9541 */
James Smarte3d2b802012-08-14 14:25:43 -04009542 ext_status = axri->parameter & IOERR_PARAM_MASK;
James Smart5c1db2a2012-03-01 22:34:36 -05009543 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9544 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
James Smartcb69f7d2011-12-13 13:21:57 -05009545 lpfc_sli_abts_recover_port(vport, ndlp);
9546}
9547
James Smarte59058c2008-08-24 21:49:00 -04009548/**
James Smart3621a712009-04-06 18:47:14 -04009549 * lpfc_sli_async_event_handler - ASYNC iocb handler function
James Smarte59058c2008-08-24 21:49:00 -04009550 * @phba: Pointer to HBA context object.
9551 * @pring: Pointer to driver SLI ring object.
9552 * @iocbq: Pointer to iocb object.
9553 *
9554 * This function is called by the slow ring event handler
9555 * function when there is an ASYNC event iocb in the ring.
9556 * This function is called with no lock held.
9557 * Currently this function handles only temperature related
9558 * ASYNC events. The function decodes the temperature sensor
9559 * event message and posts events for the management applications.
9560 **/
James Smart98c9ea52007-10-27 13:37:33 -04009561static void
James Smart57127f12007-10-27 13:37:05 -04009562lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9563 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9564{
9565 IOCB_t *icmd;
9566 uint16_t evt_code;
James Smart57127f12007-10-27 13:37:05 -04009567 struct temp_event temp_event_data;
9568 struct Scsi_Host *shost;
James Smarta257bf92009-04-06 18:48:10 -04009569 uint32_t *iocb_w;
James Smart57127f12007-10-27 13:37:05 -04009570
9571 icmd = &iocbq->iocb;
9572 evt_code = icmd->un.asyncstat.evt_code;
James Smart57127f12007-10-27 13:37:05 -04009573
James Smartcb69f7d2011-12-13 13:21:57 -05009574 switch (evt_code) {
9575 case ASYNC_TEMP_WARN:
9576 case ASYNC_TEMP_SAFE:
9577 temp_event_data.data = (uint32_t) icmd->ulpContext;
9578 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9579 if (evt_code == ASYNC_TEMP_WARN) {
9580 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9581 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9582 "0347 Adapter is very hot, please take "
9583 "corrective action. temperature : %d Celsius\n",
9584 (uint32_t) icmd->ulpContext);
9585 } else {
9586 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9587 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9588 "0340 Adapter temperature is OK now. "
9589 "temperature : %d Celsius\n",
9590 (uint32_t) icmd->ulpContext);
9591 }
9592
9593 /* Send temperature change event to applications */
9594 shost = lpfc_shost_from_vport(phba->pport);
9595 fc_host_post_vendor_event(shost, fc_get_event_number(),
9596 sizeof(temp_event_data), (char *) &temp_event_data,
9597 LPFC_NL_VENDOR_ID);
9598 break;
9599 case ASYNC_STATUS_CN:
9600 lpfc_sli_abts_err_handler(phba, iocbq);
9601 break;
9602 default:
James Smarta257bf92009-04-06 18:48:10 -04009603 iocb_w = (uint32_t *) icmd;
James Smartcb69f7d2011-12-13 13:21:57 -05009604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04009605 "0346 Ring %d handler: unexpected ASYNC_STATUS"
James Smarte4e74272009-07-19 10:01:38 -04009606 " evt_code 0x%x\n"
James Smarta257bf92009-04-06 18:48:10 -04009607 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9608 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9609 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9610 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
James Smartcb69f7d2011-12-13 13:21:57 -05009611 pring->ringno, icmd->un.asyncstat.evt_code,
James Smarta257bf92009-04-06 18:48:10 -04009612 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9613 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9614 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9615 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9616
James Smartcb69f7d2011-12-13 13:21:57 -05009617 break;
James Smart57127f12007-10-27 13:37:05 -04009618 }
James Smart57127f12007-10-27 13:37:05 -04009619}
9620
9621
James Smarte59058c2008-08-24 21:49:00 -04009622/**
James Smart895427b2017-02-12 13:52:30 -08009623 * lpfc_sli4_setup - SLI ring setup function
James Smarte59058c2008-08-24 21:49:00 -04009624 * @phba: Pointer to HBA context object.
9625 *
9626 * lpfc_sli_setup sets up rings of the SLI interface with
9627 * number of iocbs per ring and iotags. This function is
9628 * called while driver attach to the HBA and before the
9629 * interrupts are enabled. So there is no need for locking.
9630 *
9631 * This function always returns 0.
9632 **/
dea31012005-04-17 16:05:31 -05009633int
James Smart895427b2017-02-12 13:52:30 -08009634lpfc_sli4_setup(struct lpfc_hba *phba)
9635{
9636 struct lpfc_sli_ring *pring;
9637
9638 pring = phba->sli4_hba.els_wq->pring;
9639 pring->num_mask = LPFC_MAX_RING_MASK;
9640 pring->prt[0].profile = 0; /* Mask 0 */
9641 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9642 pring->prt[0].type = FC_TYPE_ELS;
9643 pring->prt[0].lpfc_sli_rcv_unsol_event =
9644 lpfc_els_unsol_event;
9645 pring->prt[1].profile = 0; /* Mask 1 */
9646 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9647 pring->prt[1].type = FC_TYPE_ELS;
9648 pring->prt[1].lpfc_sli_rcv_unsol_event =
9649 lpfc_els_unsol_event;
9650 pring->prt[2].profile = 0; /* Mask 2 */
9651 /* NameServer Inquiry */
9652 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9653 /* NameServer */
9654 pring->prt[2].type = FC_TYPE_CT;
9655 pring->prt[2].lpfc_sli_rcv_unsol_event =
9656 lpfc_ct_unsol_event;
9657 pring->prt[3].profile = 0; /* Mask 3 */
9658 /* NameServer response */
9659 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9660 /* NameServer */
9661 pring->prt[3].type = FC_TYPE_CT;
9662 pring->prt[3].lpfc_sli_rcv_unsol_event =
9663 lpfc_ct_unsol_event;
9664 return 0;
9665}
9666
9667/**
9668 * lpfc_sli_setup - SLI ring setup function
9669 * @phba: Pointer to HBA context object.
9670 *
9671 * lpfc_sli_setup sets up rings of the SLI interface with
9672 * number of iocbs per ring and iotags. This function is
9673 * called while driver attach to the HBA and before the
9674 * interrupts are enabled. So there is no need for locking.
9675 *
9676 * This function always returns 0. SLI3 only.
9677 **/
9678int
dea31012005-04-17 16:05:31 -05009679lpfc_sli_setup(struct lpfc_hba *phba)
9680{
James Smarted957682007-06-17 19:56:37 -05009681 int i, totiocbsize = 0;
dea31012005-04-17 16:05:31 -05009682 struct lpfc_sli *psli = &phba->sli;
9683 struct lpfc_sli_ring *pring;
9684
James Smart2a76a282012-08-03 12:35:54 -04009685 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea31012005-04-17 16:05:31 -05009686 psli->sli_flag = 0;
dea31012005-04-17 16:05:31 -05009687
James Bottomley604a3e32005-10-29 10:28:33 -05009688 psli->iocbq_lookup = NULL;
9689 psli->iocbq_lookup_len = 0;
9690 psli->last_iotag = 0;
9691
dea31012005-04-17 16:05:31 -05009692 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08009693 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05009694 switch (i) {
9695 case LPFC_FCP_RING: /* ring 0 - FCP */
9696 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -04009697 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9698 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9699 pring->sli.sli3.numCiocb +=
9700 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9701 pring->sli.sli3.numRiocb +=
9702 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9703 pring->sli.sli3.numCiocb +=
9704 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9705 pring->sli.sli3.numRiocb +=
9706 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9707 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009708 SLI3_IOCB_CMD_SIZE :
9709 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -04009710 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009711 SLI3_IOCB_RSP_SIZE :
9712 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -05009713 pring->iotag_ctr = 0;
9714 pring->iotag_max =
James Smart92d7f7b2007-06-17 19:56:38 -05009715 (phba->cfg_hba_queue_depth * 2);
dea31012005-04-17 16:05:31 -05009716 pring->fast_iotag = pring->iotag_max;
9717 pring->num_mask = 0;
9718 break;
James Smarta4bc3372006-12-02 13:34:16 -05009719 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea31012005-04-17 16:05:31 -05009720 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -04009721 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9722 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9723 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009724 SLI3_IOCB_CMD_SIZE :
9725 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -04009726 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009727 SLI3_IOCB_RSP_SIZE :
9728 SLI2_IOCB_RSP_SIZE;
James Smart2e0fef82007-06-17 19:56:36 -05009729 pring->iotag_max = phba->cfg_hba_queue_depth;
dea31012005-04-17 16:05:31 -05009730 pring->num_mask = 0;
9731 break;
9732 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
9733 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -04009734 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9735 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9736 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009737 SLI3_IOCB_CMD_SIZE :
9738 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -04009739 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009740 SLI3_IOCB_RSP_SIZE :
9741 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -05009742 pring->fast_iotag = 0;
9743 pring->iotag_ctr = 0;
9744 pring->iotag_max = 4096;
James Smart57127f12007-10-27 13:37:05 -04009745 pring->lpfc_sli_rcv_async_status =
9746 lpfc_sli_async_event_handler;
James Smart6669f9b2009-10-02 15:16:45 -04009747 pring->num_mask = LPFC_MAX_RING_MASK;
dea31012005-04-17 16:05:31 -05009748 pring->prt[0].profile = 0; /* Mask 0 */
James Smart6a9c52c2009-10-02 15:16:51 -04009749 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9750 pring->prt[0].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -05009751 pring->prt[0].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009752 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -05009753 pring->prt[1].profile = 0; /* Mask 1 */
James Smart6a9c52c2009-10-02 15:16:51 -04009754 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9755 pring->prt[1].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -05009756 pring->prt[1].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009757 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -05009758 pring->prt[2].profile = 0; /* Mask 2 */
9759 /* NameServer Inquiry */
James Smart6a9c52c2009-10-02 15:16:51 -04009760 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea31012005-04-17 16:05:31 -05009761 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -04009762 pring->prt[2].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -05009763 pring->prt[2].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009764 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -05009765 pring->prt[3].profile = 0; /* Mask 3 */
9766 /* NameServer response */
James Smart6a9c52c2009-10-02 15:16:51 -04009767 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea31012005-04-17 16:05:31 -05009768 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -04009769 pring->prt[3].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -05009770 pring->prt[3].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009771 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -05009772 break;
9773 }
James Smart7e56aa22012-08-03 12:35:34 -04009774 totiocbsize += (pring->sli.sli3.numCiocb *
9775 pring->sli.sli3.sizeCiocb) +
9776 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea31012005-04-17 16:05:31 -05009777 }
James Smarted957682007-06-17 19:56:37 -05009778 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea31012005-04-17 16:05:31 -05009779 /* Too many cmd / rsp ring entries in SLI2 SLIM */
James Smarte8b62012007-08-02 11:10:09 -04009780 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9781 "SLI2 SLIM Data: x%x x%lx\n",
9782 phba->brd_no, totiocbsize,
9783 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea31012005-04-17 16:05:31 -05009784 }
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009785 if (phba->cfg_multi_ring_support == 2)
9786 lpfc_extra_ring_setup(phba);
dea31012005-04-17 16:05:31 -05009787
9788 return 0;
9789}
9790
James Smarte59058c2008-08-24 21:49:00 -04009791/**
James Smart895427b2017-02-12 13:52:30 -08009792 * lpfc_sli4_queue_init - Queue initialization function
James Smarte59058c2008-08-24 21:49:00 -04009793 * @phba: Pointer to HBA context object.
9794 *
James Smart895427b2017-02-12 13:52:30 -08009795 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
James Smarte59058c2008-08-24 21:49:00 -04009796 * ring. This function also initializes ring indices of each ring.
9797 * This function is called during the initialization of the SLI
9798 * interface of an HBA.
9799 * This function is called with no lock held and always returns
9800 * 1.
9801 **/
James Smart895427b2017-02-12 13:52:30 -08009802void
9803lpfc_sli4_queue_init(struct lpfc_hba *phba)
9804{
9805 struct lpfc_sli *psli;
9806 struct lpfc_sli_ring *pring;
9807 int i;
9808
9809 psli = &phba->sli;
9810 spin_lock_irq(&phba->hbalock);
9811 INIT_LIST_HEAD(&psli->mboxq);
9812 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9813 /* Initialize list headers for txq and txcmplq as double linked lists */
9814 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
9815 pring = phba->sli4_hba.fcp_wq[i]->pring;
9816 pring->flag = 0;
9817 pring->ringno = LPFC_FCP_RING;
9818 INIT_LIST_HEAD(&pring->txq);
9819 INIT_LIST_HEAD(&pring->txcmplq);
9820 INIT_LIST_HEAD(&pring->iocb_continueq);
9821 spin_lock_init(&pring->ring_lock);
9822 }
9823 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
9824 pring = phba->sli4_hba.nvme_wq[i]->pring;
9825 pring->flag = 0;
9826 pring->ringno = LPFC_FCP_RING;
9827 INIT_LIST_HEAD(&pring->txq);
9828 INIT_LIST_HEAD(&pring->txcmplq);
9829 INIT_LIST_HEAD(&pring->iocb_continueq);
9830 spin_lock_init(&pring->ring_lock);
9831 }
9832 pring = phba->sli4_hba.els_wq->pring;
9833 pring->flag = 0;
9834 pring->ringno = LPFC_ELS_RING;
9835 INIT_LIST_HEAD(&pring->txq);
9836 INIT_LIST_HEAD(&pring->txcmplq);
9837 INIT_LIST_HEAD(&pring->iocb_continueq);
9838 spin_lock_init(&pring->ring_lock);
9839
9840 if (phba->cfg_nvme_io_channel) {
9841 pring = phba->sli4_hba.nvmels_wq->pring;
9842 pring->flag = 0;
9843 pring->ringno = LPFC_ELS_RING;
9844 INIT_LIST_HEAD(&pring->txq);
9845 INIT_LIST_HEAD(&pring->txcmplq);
9846 INIT_LIST_HEAD(&pring->iocb_continueq);
9847 spin_lock_init(&pring->ring_lock);
9848 }
9849
9850 if (phba->cfg_fof) {
9851 pring = phba->sli4_hba.oas_wq->pring;
9852 pring->flag = 0;
9853 pring->ringno = LPFC_FCP_RING;
9854 INIT_LIST_HEAD(&pring->txq);
9855 INIT_LIST_HEAD(&pring->txcmplq);
9856 INIT_LIST_HEAD(&pring->iocb_continueq);
9857 spin_lock_init(&pring->ring_lock);
9858 }
9859
9860 spin_unlock_irq(&phba->hbalock);
9861}
9862
9863/**
9864 * lpfc_sli_queue_init - Queue initialization function
9865 * @phba: Pointer to HBA context object.
9866 *
9867 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
9868 * ring. This function also initializes ring indices of each ring.
9869 * This function is called during the initialization of the SLI
9870 * interface of an HBA.
9871 * This function is called with no lock held and always returns
9872 * 1.
9873 **/
9874void
9875lpfc_sli_queue_init(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05009876{
9877 struct lpfc_sli *psli;
9878 struct lpfc_sli_ring *pring;
James Bottomley604a3e32005-10-29 10:28:33 -05009879 int i;
dea31012005-04-17 16:05:31 -05009880
9881 psli = &phba->sli;
James Smart2e0fef82007-06-17 19:56:36 -05009882 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05009883 INIT_LIST_HEAD(&psli->mboxq);
James Smart92d7f7b2007-06-17 19:56:38 -05009884 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea31012005-04-17 16:05:31 -05009885 /* Initialize list headers for txq and txcmplq as double linked lists */
9886 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08009887 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05009888 pring->ringno = i;
James Smart7e56aa22012-08-03 12:35:34 -04009889 pring->sli.sli3.next_cmdidx = 0;
9890 pring->sli.sli3.local_getidx = 0;
9891 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -05009892 INIT_LIST_HEAD(&pring->iocb_continueq);
James Smart9c2face2008-01-11 01:53:18 -05009893 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea31012005-04-17 16:05:31 -05009894 INIT_LIST_HEAD(&pring->postbufq);
James Smart895427b2017-02-12 13:52:30 -08009895 pring->flag = 0;
9896 INIT_LIST_HEAD(&pring->txq);
9897 INIT_LIST_HEAD(&pring->txcmplq);
James Smart7e56aa22012-08-03 12:35:34 -04009898 spin_lock_init(&pring->ring_lock);
dea31012005-04-17 16:05:31 -05009899 }
James Smart2e0fef82007-06-17 19:56:36 -05009900 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05009901}
9902
James Smarte59058c2008-08-24 21:49:00 -04009903/**
James Smart04c68492009-05-22 14:52:52 -04009904 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9905 * @phba: Pointer to HBA context object.
9906 *
9907 * This routine flushes the mailbox command subsystem. It will unconditionally
9908 * flush all the mailbox commands in the three possible stages in the mailbox
9909 * command sub-system: pending mailbox command queue; the outstanding mailbox
9910 * command; and completed mailbox command queue. It is caller's responsibility
9911 * to make sure that the driver is in the proper state to flush the mailbox
9912 * command sub-system. Namely, the posting of mailbox commands into the
9913 * pending mailbox command queue from the various clients must be stopped;
9914 * either the HBA is in a state that it will never works on the outstanding
9915 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9916 * mailbox command has been completed.
9917 **/
9918static void
9919lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9920{
9921 LIST_HEAD(completions);
9922 struct lpfc_sli *psli = &phba->sli;
9923 LPFC_MBOXQ_t *pmb;
9924 unsigned long iflag;
9925
9926 /* Flush all the mailbox commands in the mbox system */
9927 spin_lock_irqsave(&phba->hbalock, iflag);
9928 /* The pending mailbox command queue */
9929 list_splice_init(&phba->sli.mboxq, &completions);
9930 /* The outstanding active mailbox command */
9931 if (psli->mbox_active) {
9932 list_add_tail(&psli->mbox_active->list, &completions);
9933 psli->mbox_active = NULL;
9934 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9935 }
9936 /* The completed mailbox command queue */
9937 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9938 spin_unlock_irqrestore(&phba->hbalock, iflag);
9939
9940 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9941 while (!list_empty(&completions)) {
9942 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9943 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9944 if (pmb->mbox_cmpl)
9945 pmb->mbox_cmpl(phba, pmb);
9946 }
9947}
9948
9949/**
James Smart3621a712009-04-06 18:47:14 -04009950 * lpfc_sli_host_down - Vport cleanup function
James Smarte59058c2008-08-24 21:49:00 -04009951 * @vport: Pointer to virtual port object.
9952 *
9953 * lpfc_sli_host_down is called to clean up the resources
9954 * associated with a vport before destroying virtual
9955 * port data structures.
9956 * This function does following operations:
9957 * - Free discovery resources associated with this virtual
9958 * port.
9959 * - Free iocbs associated with this virtual port in
9960 * the txq.
9961 * - Send abort for all iocb commands associated with this
9962 * vport in txcmplq.
9963 *
9964 * This function is called with no lock held and always returns 1.
9965 **/
dea31012005-04-17 16:05:31 -05009966int
James Smart92d7f7b2007-06-17 19:56:38 -05009967lpfc_sli_host_down(struct lpfc_vport *vport)
9968{
James Smart858c9f62007-06-17 19:56:39 -05009969 LIST_HEAD(completions);
James Smart92d7f7b2007-06-17 19:56:38 -05009970 struct lpfc_hba *phba = vport->phba;
9971 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -08009972 struct lpfc_queue *qp = NULL;
James Smart92d7f7b2007-06-17 19:56:38 -05009973 struct lpfc_sli_ring *pring;
9974 struct lpfc_iocbq *iocb, *next_iocb;
James Smart92d7f7b2007-06-17 19:56:38 -05009975 int i;
9976 unsigned long flags = 0;
9977 uint16_t prev_pring_flag;
9978
9979 lpfc_cleanup_discovery_resources(vport);
9980
9981 spin_lock_irqsave(&phba->hbalock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -05009982
James Smart895427b2017-02-12 13:52:30 -08009983 /*
9984 * Error everything on the txq since these iocbs
9985 * have not been given to the FW yet.
9986 * Also issue ABTS for everything on the txcmplq
9987 */
9988 if (phba->sli_rev != LPFC_SLI_REV4) {
9989 for (i = 0; i < psli->num_rings; i++) {
9990 pring = &psli->sli3_ring[i];
9991 prev_pring_flag = pring->flag;
9992 /* Only slow rings */
9993 if (pring->ringno == LPFC_ELS_RING) {
9994 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9995 /* Set the lpfc data pending flag */
9996 set_bit(LPFC_DATA_READY, &phba->data_flags);
9997 }
9998 list_for_each_entry_safe(iocb, next_iocb,
9999 &pring->txq, list) {
10000 if (iocb->vport != vport)
10001 continue;
10002 list_move_tail(&iocb->list, &completions);
10003 }
10004 list_for_each_entry_safe(iocb, next_iocb,
10005 &pring->txcmplq, list) {
10006 if (iocb->vport != vport)
10007 continue;
10008 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10009 }
10010 pring->flag = prev_pring_flag;
James Smart92d7f7b2007-06-17 19:56:38 -050010011 }
James Smart895427b2017-02-12 13:52:30 -080010012 } else {
10013 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10014 pring = qp->pring;
10015 if (!pring)
10016 continue;
10017 if (pring == phba->sli4_hba.els_wq->pring) {
10018 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10019 /* Set the lpfc data pending flag */
10020 set_bit(LPFC_DATA_READY, &phba->data_flags);
10021 }
10022 prev_pring_flag = pring->flag;
10023 spin_lock_irq(&pring->ring_lock);
10024 list_for_each_entry_safe(iocb, next_iocb,
10025 &pring->txq, list) {
10026 if (iocb->vport != vport)
10027 continue;
10028 list_move_tail(&iocb->list, &completions);
10029 }
10030 spin_unlock_irq(&pring->ring_lock);
10031 list_for_each_entry_safe(iocb, next_iocb,
10032 &pring->txcmplq, list) {
10033 if (iocb->vport != vport)
10034 continue;
10035 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10036 }
10037 pring->flag = prev_pring_flag;
10038 }
James Smart92d7f7b2007-06-17 19:56:38 -050010039 }
James Smart92d7f7b2007-06-17 19:56:38 -050010040 spin_unlock_irqrestore(&phba->hbalock, flags);
10041
James Smarta257bf92009-04-06 18:48:10 -040010042 /* Cancel all the IOCBs from the completions list */
10043 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10044 IOERR_SLI_DOWN);
James Smart92d7f7b2007-06-17 19:56:38 -050010045 return 1;
10046}
10047
James Smarte59058c2008-08-24 21:49:00 -040010048/**
James Smart3621a712009-04-06 18:47:14 -040010049 * lpfc_sli_hba_down - Resource cleanup function for the HBA
James Smarte59058c2008-08-24 21:49:00 -040010050 * @phba: Pointer to HBA context object.
10051 *
10052 * This function cleans up all iocb, buffers, mailbox commands
10053 * while shutting down the HBA. This function is called with no
10054 * lock held and always returns 1.
10055 * This function does the following to cleanup driver resources:
10056 * - Free discovery resources for each virtual port
10057 * - Cleanup any pending fabric iocbs
10058 * - Iterate through the iocb txq and free each entry
10059 * in the list.
10060 * - Free up any buffer posted to the HBA
10061 * - Free mailbox commands in the mailbox queue.
10062 **/
James Smart92d7f7b2007-06-17 19:56:38 -050010063int
James Smart2e0fef82007-06-17 19:56:36 -050010064lpfc_sli_hba_down(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -050010065{
James Smart2534ba72007-04-25 09:52:20 -040010066 LIST_HEAD(completions);
James Smart2e0fef82007-06-17 19:56:36 -050010067 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -080010068 struct lpfc_queue *qp = NULL;
dea31012005-04-17 16:05:31 -050010069 struct lpfc_sli_ring *pring;
James Smart0ff10d42008-01-11 01:52:36 -050010070 struct lpfc_dmabuf *buf_ptr;
dea31012005-04-17 16:05:31 -050010071 unsigned long flags = 0;
James Smart04c68492009-05-22 14:52:52 -040010072 int i;
10073
10074 /* Shutdown the mailbox command sub-system */
James Smart618a5232012-06-12 13:54:36 -040010075 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea31012005-04-17 16:05:31 -050010076
dea31012005-04-17 16:05:31 -050010077 lpfc_hba_down_prep(phba);
10078
James Smart92d7f7b2007-06-17 19:56:38 -050010079 lpfc_fabric_abort_hba(phba);
10080
James Smart2e0fef82007-06-17 19:56:36 -050010081 spin_lock_irqsave(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050010082
James Smart895427b2017-02-12 13:52:30 -080010083 /*
10084 * Error everything on the txq since these iocbs
10085 * have not been given to the FW yet.
10086 */
10087 if (phba->sli_rev != LPFC_SLI_REV4) {
10088 for (i = 0; i < psli->num_rings; i++) {
10089 pring = &psli->sli3_ring[i];
10090 /* Only slow rings */
10091 if (pring->ringno == LPFC_ELS_RING) {
10092 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10093 /* Set the lpfc data pending flag */
10094 set_bit(LPFC_DATA_READY, &phba->data_flags);
10095 }
10096 list_splice_init(&pring->txq, &completions);
10097 }
10098 } else {
10099 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10100 pring = qp->pring;
10101 if (!pring)
10102 continue;
10103 spin_lock_irq(&pring->ring_lock);
10104 list_splice_init(&pring->txq, &completions);
10105 spin_unlock_irq(&pring->ring_lock);
10106 if (pring == phba->sli4_hba.els_wq->pring) {
10107 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10108 /* Set the lpfc data pending flag */
10109 set_bit(LPFC_DATA_READY, &phba->data_flags);
10110 }
10111 }
dea31012005-04-17 16:05:31 -050010112 }
James Smart2e0fef82007-06-17 19:56:36 -050010113 spin_unlock_irqrestore(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050010114
James Smarta257bf92009-04-06 18:48:10 -040010115 /* Cancel all the IOCBs from the completions list */
10116 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10117 IOERR_SLI_DOWN);
James Smart2534ba72007-04-25 09:52:20 -040010118
James Smart0ff10d42008-01-11 01:52:36 -050010119 spin_lock_irqsave(&phba->hbalock, flags);
10120 list_splice_init(&phba->elsbuf, &completions);
10121 phba->elsbuf_cnt = 0;
10122 phba->elsbuf_prev_cnt = 0;
10123 spin_unlock_irqrestore(&phba->hbalock, flags);
10124
10125 while (!list_empty(&completions)) {
10126 list_remove_head(&completions, buf_ptr,
10127 struct lpfc_dmabuf, list);
10128 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10129 kfree(buf_ptr);
10130 }
10131
dea31012005-04-17 16:05:31 -050010132 /* Return any active mbox cmds */
10133 del_timer_sync(&psli->mbox_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -050010134
James Smartda0436e2009-05-22 14:51:39 -040010135 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010136 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
James Smartda0436e2009-05-22 14:51:39 -040010137 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010138
James Smartda0436e2009-05-22 14:51:39 -040010139 return 1;
10140}
James Smart92d7f7b2007-06-17 19:56:38 -050010141
James Smartda0436e2009-05-22 14:51:39 -040010142/**
James Smart3621a712009-04-06 18:47:14 -040010143 * lpfc_sli_pcimem_bcopy - SLI memory copy function
James Smarte59058c2008-08-24 21:49:00 -040010144 * @srcp: Source memory pointer.
10145 * @destp: Destination memory pointer.
10146 * @cnt: Number of words required to be copied.
10147 *
10148 * This function is used for copying data between driver memory
10149 * and the SLI memory. This function also changes the endianness
10150 * of each word if native endianness is different from SLI
10151 * endianness. This function can be called with or without
10152 * lock.
10153 **/
dea31012005-04-17 16:05:31 -050010154void
10155lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10156{
10157 uint32_t *src = srcp;
10158 uint32_t *dest = destp;
10159 uint32_t ldata;
10160 int i;
10161
10162 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10163 ldata = *src;
10164 ldata = le32_to_cpu(ldata);
10165 *dest = ldata;
10166 src++;
10167 dest++;
10168 }
10169}
10170
James Smarte59058c2008-08-24 21:49:00 -040010171
10172/**
James Smarta0c87cb2009-07-19 10:01:10 -040010173 * lpfc_sli_bemem_bcopy - SLI memory copy function
10174 * @srcp: Source memory pointer.
10175 * @destp: Destination memory pointer.
10176 * @cnt: Number of words required to be copied.
10177 *
10178 * This function is used for copying data between a data structure
10179 * with big endian representation to local endianness.
10180 * This function can be called with or without lock.
10181 **/
10182void
10183lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10184{
10185 uint32_t *src = srcp;
10186 uint32_t *dest = destp;
10187 uint32_t ldata;
10188 int i;
10189
10190 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10191 ldata = *src;
10192 ldata = be32_to_cpu(ldata);
10193 *dest = ldata;
10194 src++;
10195 dest++;
10196 }
10197}
10198
10199/**
James Smart3621a712009-04-06 18:47:14 -040010200 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
James Smarte59058c2008-08-24 21:49:00 -040010201 * @phba: Pointer to HBA context object.
10202 * @pring: Pointer to driver SLI ring object.
10203 * @mp: Pointer to driver buffer object.
10204 *
10205 * This function is called with no lock held.
10206 * It always return zero after adding the buffer to the postbufq
10207 * buffer list.
10208 **/
dea31012005-04-17 16:05:31 -050010209int
James Smart2e0fef82007-06-17 19:56:36 -050010210lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10211 struct lpfc_dmabuf *mp)
dea31012005-04-17 16:05:31 -050010212{
10213 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10214 later */
James Smart2e0fef82007-06-17 19:56:36 -050010215 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010216 list_add_tail(&mp->list, &pring->postbufq);
dea31012005-04-17 16:05:31 -050010217 pring->postbufq_cnt++;
James Smart2e0fef82007-06-17 19:56:36 -050010218 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010219 return 0;
10220}
10221
James Smarte59058c2008-08-24 21:49:00 -040010222/**
James Smart3621a712009-04-06 18:47:14 -040010223 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
James Smarte59058c2008-08-24 21:49:00 -040010224 * @phba: Pointer to HBA context object.
10225 *
10226 * When HBQ is enabled, buffers are searched based on tags. This function
10227 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10228 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10229 * does not conflict with tags of buffer posted for unsolicited events.
10230 * The function returns the allocated tag. The function is called with
10231 * no locks held.
10232 **/
James Smart76bb24e2007-10-27 13:38:00 -040010233uint32_t
10234lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10235{
10236 spin_lock_irq(&phba->hbalock);
10237 phba->buffer_tag_count++;
10238 /*
10239 * Always set the QUE_BUFTAG_BIT to distiguish between
10240 * a tag assigned by HBQ.
10241 */
10242 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10243 spin_unlock_irq(&phba->hbalock);
10244 return phba->buffer_tag_count;
10245}
10246
James Smarte59058c2008-08-24 21:49:00 -040010247/**
James Smart3621a712009-04-06 18:47:14 -040010248 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
James Smarte59058c2008-08-24 21:49:00 -040010249 * @phba: Pointer to HBA context object.
10250 * @pring: Pointer to driver SLI ring object.
10251 * @tag: Buffer tag.
10252 *
10253 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10254 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10255 * iocb is posted to the response ring with the tag of the buffer.
10256 * This function searches the pring->postbufq list using the tag
10257 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10258 * iocb. If the buffer is found then lpfc_dmabuf object of the
10259 * buffer is returned to the caller else NULL is returned.
10260 * This function is called with no lock held.
10261 **/
James Smart76bb24e2007-10-27 13:38:00 -040010262struct lpfc_dmabuf *
10263lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10264 uint32_t tag)
10265{
10266 struct lpfc_dmabuf *mp, *next_mp;
10267 struct list_head *slp = &pring->postbufq;
10268
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010269 /* Search postbufq, from the beginning, looking for a match on tag */
James Smart76bb24e2007-10-27 13:38:00 -040010270 spin_lock_irq(&phba->hbalock);
10271 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10272 if (mp->buffer_tag == tag) {
10273 list_del_init(&mp->list);
10274 pring->postbufq_cnt--;
10275 spin_unlock_irq(&phba->hbalock);
10276 return mp;
10277 }
10278 }
10279
10280 spin_unlock_irq(&phba->hbalock);
10281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartd7c255b2008-08-24 21:50:00 -040010282 "0402 Cannot find virtual addr for buffer tag on "
James Smart76bb24e2007-10-27 13:38:00 -040010283 "ring %d Data x%lx x%p x%p x%x\n",
10284 pring->ringno, (unsigned long) tag,
10285 slp->next, slp->prev, pring->postbufq_cnt);
10286
10287 return NULL;
10288}
dea31012005-04-17 16:05:31 -050010289
James Smarte59058c2008-08-24 21:49:00 -040010290/**
James Smart3621a712009-04-06 18:47:14 -040010291 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
James Smarte59058c2008-08-24 21:49:00 -040010292 * @phba: Pointer to HBA context object.
10293 * @pring: Pointer to driver SLI ring object.
10294 * @phys: DMA address of the buffer.
10295 *
10296 * This function searches the buffer list using the dma_address
10297 * of unsolicited event to find the driver's lpfc_dmabuf object
10298 * corresponding to the dma_address. The function returns the
10299 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10300 * This function is called by the ct and els unsolicited event
10301 * handlers to get the buffer associated with the unsolicited
10302 * event.
10303 *
10304 * This function is called with no lock held.
10305 **/
dea31012005-04-17 16:05:31 -050010306struct lpfc_dmabuf *
10307lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10308 dma_addr_t phys)
10309{
10310 struct lpfc_dmabuf *mp, *next_mp;
10311 struct list_head *slp = &pring->postbufq;
10312
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010313 /* Search postbufq, from the beginning, looking for a match on phys */
James Smart2e0fef82007-06-17 19:56:36 -050010314 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010315 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10316 if (mp->phys == phys) {
10317 list_del_init(&mp->list);
10318 pring->postbufq_cnt--;
James Smart2e0fef82007-06-17 19:56:36 -050010319 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010320 return mp;
10321 }
10322 }
10323
James Smart2e0fef82007-06-17 19:56:36 -050010324 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -040010326 "0410 Cannot find virtual addr for mapped buf on "
dea31012005-04-17 16:05:31 -050010327 "ring %d Data x%llx x%p x%p x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040010328 pring->ringno, (unsigned long long)phys,
dea31012005-04-17 16:05:31 -050010329 slp->next, slp->prev, pring->postbufq_cnt);
10330 return NULL;
10331}
10332
James Smarte59058c2008-08-24 21:49:00 -040010333/**
James Smart3621a712009-04-06 18:47:14 -040010334 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
James Smarte59058c2008-08-24 21:49:00 -040010335 * @phba: Pointer to HBA context object.
10336 * @cmdiocb: Pointer to driver command iocb object.
10337 * @rspiocb: Pointer to driver response iocb object.
10338 *
10339 * This function is the completion handler for the abort iocbs for
10340 * ELS commands. This function is called from the ELS ring event
10341 * handler with no lock held. This function frees memory resources
10342 * associated with the abort iocb.
10343 **/
dea31012005-04-17 16:05:31 -050010344static void
James Smart2e0fef82007-06-17 19:56:36 -050010345lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10346 struct lpfc_iocbq *rspiocb)
dea31012005-04-17 16:05:31 -050010347{
James Smart2e0fef82007-06-17 19:56:36 -050010348 IOCB_t *irsp = &rspiocb->iocb;
James Smart2680eea2007-04-25 09:52:55 -040010349 uint16_t abort_iotag, abort_context;
James Smartff78d8f2011-12-13 13:21:35 -050010350 struct lpfc_iocbq *abort_iocb = NULL;
James Smart2680eea2007-04-25 09:52:55 -040010351
10352 if (irsp->ulpStatus) {
James Smartff78d8f2011-12-13 13:21:35 -050010353
10354 /*
10355 * Assume that the port already completed and returned, or
10356 * will return the iocb. Just Log the message.
10357 */
James Smart2680eea2007-04-25 09:52:55 -040010358 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10359 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10360
James Smart2e0fef82007-06-17 19:56:36 -050010361 spin_lock_irq(&phba->hbalock);
James Smart45ed1192009-10-02 15:17:02 -040010362 if (phba->sli_rev < LPFC_SLI_REV4) {
10363 if (abort_iotag != 0 &&
10364 abort_iotag <= phba->sli.last_iotag)
10365 abort_iocb =
10366 phba->sli.iocbq_lookup[abort_iotag];
10367 } else
10368 /* For sli4 the abort_tag is the XRI,
10369 * so the abort routine puts the iotag of the iocb
10370 * being aborted in the context field of the abort
10371 * IOCB.
10372 */
10373 abort_iocb = phba->sli.iocbq_lookup[abort_context];
James Smart2680eea2007-04-25 09:52:55 -040010374
James Smart2a9bf3d2010-06-07 15:24:45 -040010375 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10376 "0327 Cannot abort els iocb %p "
10377 "with tag %x context %x, abort status %x, "
10378 "abort code %x\n",
10379 abort_iocb, abort_iotag, abort_context,
10380 irsp->ulpStatus, irsp->un.ulpWord[4]);
James Smart2680eea2007-04-25 09:52:55 -040010381
James Smartff78d8f2011-12-13 13:21:35 -050010382 spin_unlock_irq(&phba->hbalock);
James Smart2680eea2007-04-25 09:52:55 -040010383 }
James Bottomley604a3e32005-10-29 10:28:33 -050010384 lpfc_sli_release_iocbq(phba, cmdiocb);
dea31012005-04-17 16:05:31 -050010385 return;
10386}
10387
James Smarte59058c2008-08-24 21:49:00 -040010388/**
James Smart3621a712009-04-06 18:47:14 -040010389 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
James Smarte59058c2008-08-24 21:49:00 -040010390 * @phba: Pointer to HBA context object.
10391 * @cmdiocb: Pointer to driver command iocb object.
10392 * @rspiocb: Pointer to driver response iocb object.
10393 *
10394 * The function is called from SLI ring event handler with no
10395 * lock held. This function is the completion handler for ELS commands
10396 * which are aborted. The function frees memory resources used for
10397 * the aborted ELS commands.
10398 **/
James Smart92d7f7b2007-06-17 19:56:38 -050010399static void
10400lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10401 struct lpfc_iocbq *rspiocb)
10402{
10403 IOCB_t *irsp = &rspiocb->iocb;
10404
10405 /* ELS cmd tag <ulpIoTag> completes */
10406 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smartd7c255b2008-08-24 21:50:00 -040010407 "0139 Ignoring ELS cmd tag x%x completion Data: "
James Smart92d7f7b2007-06-17 19:56:38 -050010408 "x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040010409 irsp->ulpIoTag, irsp->ulpStatus,
James Smart92d7f7b2007-06-17 19:56:38 -050010410 irsp->un.ulpWord[4], irsp->ulpTimeout);
James Smart858c9f62007-06-17 19:56:39 -050010411 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10412 lpfc_ct_free_iocb(phba, cmdiocb);
10413 else
10414 lpfc_els_free_iocb(phba, cmdiocb);
James Smart92d7f7b2007-06-17 19:56:38 -050010415 return;
10416}
10417
James Smarte59058c2008-08-24 21:49:00 -040010418/**
James Smart5af5eee2010-10-22 11:06:38 -040010419 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
James Smarte59058c2008-08-24 21:49:00 -040010420 * @phba: Pointer to HBA context object.
10421 * @pring: Pointer to driver SLI ring object.
10422 * @cmdiocb: Pointer to driver command iocb object.
10423 *
James Smart5af5eee2010-10-22 11:06:38 -040010424 * This function issues an abort iocb for the provided command iocb down to
10425 * the port. Other than the case the outstanding command iocb is an abort
10426 * request, this function issues abort out unconditionally. This function is
10427 * called with hbalock held. The function returns 0 when it fails due to
10428 * memory allocation failure or when the command iocb is an abort request.
James Smarte59058c2008-08-24 21:49:00 -040010429 **/
James Smart5af5eee2010-10-22 11:06:38 -040010430static int
10431lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -050010432 struct lpfc_iocbq *cmdiocb)
dea31012005-04-17 16:05:31 -050010433{
James Smart2e0fef82007-06-17 19:56:36 -050010434 struct lpfc_vport *vport = cmdiocb->vport;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010435 struct lpfc_iocbq *abtsiocbp;
dea31012005-04-17 16:05:31 -050010436 IOCB_t *icmd = NULL;
10437 IOCB_t *iabt = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040010438 int retval;
James Smart7e56aa22012-08-03 12:35:34 -040010439 unsigned long iflags;
James Smart07951072007-04-25 09:51:38 -040010440
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010010441 lockdep_assert_held(&phba->hbalock);
10442
James Smart92d7f7b2007-06-17 19:56:38 -050010443 /*
10444 * There are certain command types we don't want to abort. And we
10445 * don't want to abort commands that are already in the process of
10446 * being aborted.
James Smart07951072007-04-25 09:51:38 -040010447 */
10448 icmd = &cmdiocb->iocb;
James Smart2e0fef82007-06-17 19:56:36 -050010449 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
James Smart92d7f7b2007-06-17 19:56:38 -050010450 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10451 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
James Smart07951072007-04-25 09:51:38 -040010452 return 0;
10453
dea31012005-04-17 16:05:31 -050010454 /* issue ABTS for this IOCB based on iotag */
James Smart92d7f7b2007-06-17 19:56:38 -050010455 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050010456 if (abtsiocbp == NULL)
10457 return 0;
dea31012005-04-17 16:05:31 -050010458
James Smart07951072007-04-25 09:51:38 -040010459 /* This signals the response to set the correct status
James Smart341af102010-01-26 23:07:37 -050010460 * before calling the completion handler
James Smart07951072007-04-25 09:51:38 -040010461 */
10462 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10463
dea31012005-04-17 16:05:31 -050010464 iabt = &abtsiocbp->iocb;
James Smart07951072007-04-25 09:51:38 -040010465 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10466 iabt->un.acxri.abortContextTag = icmd->ulpContext;
James Smart45ed1192009-10-02 15:17:02 -040010467 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -040010468 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
James Smart45ed1192009-10-02 15:17:02 -040010469 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10470 }
James Smartda0436e2009-05-22 14:51:39 -040010471 else
10472 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
dea31012005-04-17 16:05:31 -050010473 iabt->ulpLe = 1;
James Smart07951072007-04-25 09:51:38 -040010474 iabt->ulpClass = icmd->ulpClass;
dea31012005-04-17 16:05:31 -050010475
James Smart5ffc2662009-11-18 15:39:44 -050010476 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080010477 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
James Smart341af102010-01-26 23:07:37 -050010478 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10479 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040010480 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10481 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050010482
James Smart2e0fef82007-06-17 19:56:36 -050010483 if (phba->link_state >= LPFC_LINK_UP)
James Smart07951072007-04-25 09:51:38 -040010484 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10485 else
10486 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10487
10488 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
James Smarte6c6acc2016-12-19 15:07:23 -080010489 abtsiocbp->vport = vport;
James Smart5b8bd0c2007-04-25 09:52:49 -040010490
James Smarte8b62012007-08-02 11:10:09 -040010491 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10492 "0339 Abort xri x%x, original iotag x%x, "
10493 "abort cmd iotag x%x\n",
James Smart2a9bf3d2010-06-07 15:24:45 -040010494 iabt->un.acxri.abortIoTag,
James Smarte8b62012007-08-02 11:10:09 -040010495 iabt->un.acxri.abortContextTag,
James Smart2a9bf3d2010-06-07 15:24:45 -040010496 abtsiocbp->iotag);
James Smart7e56aa22012-08-03 12:35:34 -040010497
10498 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -080010499 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10500 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -040010501 return 0;
James Smart7e56aa22012-08-03 12:35:34 -040010502 /* Note: both hbalock and ring_lock need to be set here */
10503 spin_lock_irqsave(&pring->ring_lock, iflags);
10504 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10505 abtsiocbp, 0);
10506 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10507 } else {
10508 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10509 abtsiocbp, 0);
10510 }
James Smart07951072007-04-25 09:51:38 -040010511
James Smartd7c255b2008-08-24 21:50:00 -040010512 if (retval)
10513 __lpfc_sli_release_iocbq(phba, abtsiocbp);
James Smart5af5eee2010-10-22 11:06:38 -040010514
10515 /*
10516 * Caller to this routine should check for IOCB_ERROR
10517 * and handle it properly. This routine no longer removes
10518 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10519 */
10520 return retval;
10521}
10522
10523/**
10524 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10525 * @phba: Pointer to HBA context object.
10526 * @pring: Pointer to driver SLI ring object.
10527 * @cmdiocb: Pointer to driver command iocb object.
10528 *
10529 * This function issues an abort iocb for the provided command iocb. In case
10530 * of unloading, the abort iocb will not be issued to commands on the ELS
10531 * ring. Instead, the callback function shall be changed to those commands
10532 * so that nothing happens when them finishes. This function is called with
10533 * hbalock held. The function returns 0 when the command iocb is an abort
10534 * request.
10535 **/
10536int
10537lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10538 struct lpfc_iocbq *cmdiocb)
10539{
10540 struct lpfc_vport *vport = cmdiocb->vport;
10541 int retval = IOCB_ERROR;
10542 IOCB_t *icmd = NULL;
10543
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010010544 lockdep_assert_held(&phba->hbalock);
10545
James Smart5af5eee2010-10-22 11:06:38 -040010546 /*
10547 * There are certain command types we don't want to abort. And we
10548 * don't want to abort commands that are already in the process of
10549 * being aborted.
10550 */
10551 icmd = &cmdiocb->iocb;
10552 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10553 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10554 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10555 return 0;
10556
10557 /*
10558 * If we're unloading, don't abort iocb on the ELS ring, but change
10559 * the callback so that nothing happens when it finishes.
10560 */
10561 if ((vport->load_flag & FC_UNLOADING) &&
10562 (pring->ringno == LPFC_ELS_RING)) {
10563 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10564 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10565 else
10566 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10567 goto abort_iotag_exit;
10568 }
10569
10570 /* Now, we try to issue the abort to the cmdiocb out */
10571 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10572
James Smart07951072007-04-25 09:51:38 -040010573abort_iotag_exit:
James Smart2e0fef82007-06-17 19:56:36 -050010574 /*
10575 * Caller to this routine should check for IOCB_ERROR
10576 * and handle it properly. This routine no longer removes
10577 * iocb off txcmplq and call compl in case of IOCB_ERROR.
James Smart07951072007-04-25 09:51:38 -040010578 */
James Smart2e0fef82007-06-17 19:56:36 -050010579 return retval;
dea31012005-04-17 16:05:31 -050010580}
10581
James Smarte59058c2008-08-24 21:49:00 -040010582/**
James Smart895427b2017-02-12 13:52:30 -080010583 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10584 * @phba: Pointer to HBA context object.
10585 * @pring: Pointer to driver SLI ring object.
10586 * @cmdiocb: Pointer to driver command iocb object.
10587 *
10588 * This function issues an abort iocb for the provided command iocb down to
10589 * the port. Other than the case the outstanding command iocb is an abort
10590 * request, this function issues abort out unconditionally. This function is
10591 * called with hbalock held. The function returns 0 when it fails due to
10592 * memory allocation failure or when the command iocb is an abort request.
10593 **/
10594static int
10595lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10596 struct lpfc_iocbq *cmdiocb)
10597{
10598 struct lpfc_vport *vport = cmdiocb->vport;
10599 struct lpfc_iocbq *abtsiocbp;
10600 union lpfc_wqe *abts_wqe;
10601 int retval;
10602
10603 /*
10604 * There are certain command types we don't want to abort. And we
10605 * don't want to abort commands that are already in the process of
10606 * being aborted.
10607 */
10608 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10609 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10610 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10611 return 0;
10612
10613 /* issue ABTS for this io based on iotag */
10614 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10615 if (abtsiocbp == NULL)
10616 return 0;
10617
10618 /* This signals the response to set the correct status
10619 * before calling the completion handler
10620 */
10621 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10622
10623 /* Complete prepping the abort wqe and issue to the FW. */
10624 abts_wqe = &abtsiocbp->wqe;
10625 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10626 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10627
10628 /* Explicitly set reserved fields to zero.*/
10629 abts_wqe->abort_cmd.rsrvd4 = 0;
10630 abts_wqe->abort_cmd.rsrvd5 = 0;
10631
10632 /* WQE Common - word 6. Context is XRI tag. Set 0. */
10633 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10634 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10635
10636 /* word 7 */
10637 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10638 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10639 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10640 cmdiocb->iocb.ulpClass);
10641
10642 /* word 8 - tell the FW to abort the IO associated with this
10643 * outstanding exchange ID.
10644 */
10645 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10646
10647 /* word 9 - this is the iotag for the abts_wqe completion. */
10648 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10649 abtsiocbp->iotag);
10650
10651 /* word 10 */
10652 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10653 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10654 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10655
10656 /* word 11 */
10657 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10658 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10659 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10660
10661 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10662 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10663 abtsiocbp->vport = vport;
James Smart01649562017-02-12 13:52:32 -080010664 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
James Smart895427b2017-02-12 13:52:30 -080010665 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10666 if (retval == IOCB_ERROR) {
10667 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10668 "6147 Failed abts issue_wqe with status x%x "
10669 "for oxid x%x\n",
10670 retval, cmdiocb->sli4_xritag);
10671 lpfc_sli_release_iocbq(phba, abtsiocbp);
10672 return retval;
10673 }
10674
10675 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10676 "6148 Drv Abort NVME Request Issued for "
10677 "ox_id x%x on reqtag x%x\n",
10678 cmdiocb->sli4_xritag,
10679 abtsiocbp->iotag);
10680
10681 return retval;
10682}
10683
10684/**
James Smart5af5eee2010-10-22 11:06:38 -040010685 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10686 * @phba: pointer to lpfc HBA data structure.
10687 *
10688 * This routine will abort all pending and outstanding iocbs to an HBA.
10689 **/
10690void
10691lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10692{
10693 struct lpfc_sli *psli = &phba->sli;
10694 struct lpfc_sli_ring *pring;
James Smart895427b2017-02-12 13:52:30 -080010695 struct lpfc_queue *qp = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040010696 int i;
10697
James Smart895427b2017-02-12 13:52:30 -080010698 if (phba->sli_rev != LPFC_SLI_REV4) {
10699 for (i = 0; i < psli->num_rings; i++) {
10700 pring = &psli->sli3_ring[i];
10701 lpfc_sli_abort_iocb_ring(phba, pring);
10702 }
10703 return;
10704 }
10705 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10706 pring = qp->pring;
10707 if (!pring)
10708 continue;
James Smartdb55fba2014-04-04 13:52:02 -040010709 lpfc_sli_abort_iocb_ring(phba, pring);
James Smart5af5eee2010-10-22 11:06:38 -040010710 }
10711}
10712
10713/**
James Smart3621a712009-04-06 18:47:14 -040010714 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
James Smarte59058c2008-08-24 21:49:00 -040010715 * @iocbq: Pointer to driver iocb object.
10716 * @vport: Pointer to driver virtual port object.
10717 * @tgt_id: SCSI ID of the target.
10718 * @lun_id: LUN ID of the scsi device.
10719 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10720 *
James Smart3621a712009-04-06 18:47:14 -040010721 * This function acts as an iocb filter for functions which abort or count
James Smarte59058c2008-08-24 21:49:00 -040010722 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10723 * 0 if the filtering criteria is met for the given iocb and will return
10724 * 1 if the filtering criteria is not met.
10725 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10726 * given iocb is for the SCSI device specified by vport, tgt_id and
10727 * lun_id parameter.
10728 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
10729 * given iocb is for the SCSI target specified by vport and tgt_id
10730 * parameters.
10731 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10732 * given iocb is for the SCSI host associated with the given vport.
10733 * This function is called with no locks held.
10734 **/
dea31012005-04-17 16:05:31 -050010735static int
James Smart51ef4c22007-08-02 11:10:31 -040010736lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10737 uint16_t tgt_id, uint64_t lun_id,
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010738 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050010739{
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010740 struct lpfc_scsi_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050010741 int rc = 1;
10742
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010743 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
10744 return rc;
10745
James Smart51ef4c22007-08-02 11:10:31 -040010746 if (iocbq->vport != vport)
10747 return rc;
10748
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010749 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010750
James Smart495a7142008-06-14 22:52:59 -040010751 if (lpfc_cmd->pCmd == NULL)
dea31012005-04-17 16:05:31 -050010752 return rc;
10753
10754 switch (ctx_cmd) {
10755 case LPFC_CTX_LUN:
James Smart495a7142008-06-14 22:52:59 -040010756 if ((lpfc_cmd->rdata->pnode) &&
10757 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10758 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea31012005-04-17 16:05:31 -050010759 rc = 0;
10760 break;
10761 case LPFC_CTX_TGT:
James Smart495a7142008-06-14 22:52:59 -040010762 if ((lpfc_cmd->rdata->pnode) &&
10763 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea31012005-04-17 16:05:31 -050010764 rc = 0;
10765 break;
dea31012005-04-17 16:05:31 -050010766 case LPFC_CTX_HOST:
10767 rc = 0;
10768 break;
10769 default:
10770 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -070010771 __func__, ctx_cmd);
dea31012005-04-17 16:05:31 -050010772 break;
10773 }
10774
10775 return rc;
10776}
10777
James Smarte59058c2008-08-24 21:49:00 -040010778/**
James Smart3621a712009-04-06 18:47:14 -040010779 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
James Smarte59058c2008-08-24 21:49:00 -040010780 * @vport: Pointer to virtual port.
10781 * @tgt_id: SCSI ID of the target.
10782 * @lun_id: LUN ID of the scsi device.
10783 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10784 *
10785 * This function returns number of FCP commands pending for the vport.
10786 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10787 * commands pending on the vport associated with SCSI device specified
10788 * by tgt_id and lun_id parameters.
10789 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10790 * commands pending on the vport associated with SCSI target specified
10791 * by tgt_id parameter.
10792 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10793 * commands pending on the vport.
10794 * This function returns the number of iocbs which satisfy the filter.
10795 * This function is called without any lock held.
10796 **/
dea31012005-04-17 16:05:31 -050010797int
James Smart51ef4c22007-08-02 11:10:31 -040010798lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10799 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050010800{
James Smart51ef4c22007-08-02 11:10:31 -040010801 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010802 struct lpfc_iocbq *iocbq;
10803 int sum, i;
dea31012005-04-17 16:05:31 -050010804
Johannes Thumshirn31979002016-07-18 16:06:03 +020010805 spin_lock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010806 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10807 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050010808
James Smart51ef4c22007-08-02 11:10:31 -040010809 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10810 ctx_cmd) == 0)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010811 sum++;
dea31012005-04-17 16:05:31 -050010812 }
Johannes Thumshirn31979002016-07-18 16:06:03 +020010813 spin_unlock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010814
dea31012005-04-17 16:05:31 -050010815 return sum;
10816}
10817
James Smarte59058c2008-08-24 21:49:00 -040010818/**
James Smart3621a712009-04-06 18:47:14 -040010819 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
James Smarte59058c2008-08-24 21:49:00 -040010820 * @phba: Pointer to HBA context object
10821 * @cmdiocb: Pointer to command iocb object.
10822 * @rspiocb: Pointer to response iocb object.
10823 *
10824 * This function is called when an aborted FCP iocb completes. This
10825 * function is called by the ring event handler with no lock held.
10826 * This function frees the iocb.
10827 **/
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010828void
James Smart2e0fef82007-06-17 19:56:36 -050010829lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10830 struct lpfc_iocbq *rspiocb)
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010831{
James Smartcb69f7d2011-12-13 13:21:57 -050010832 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart8e668af2013-05-31 17:04:28 -040010833 "3096 ABORT_XRI_CN completing on rpi x%x "
James Smartcb69f7d2011-12-13 13:21:57 -050010834 "original iotag x%x, abort cmd iotag x%x "
10835 "status 0x%x, reason 0x%x\n",
10836 cmdiocb->iocb.un.acxri.abortContextTag,
10837 cmdiocb->iocb.un.acxri.abortIoTag,
10838 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10839 rspiocb->iocb.un.ulpWord[4]);
James Bottomley604a3e32005-10-29 10:28:33 -050010840 lpfc_sli_release_iocbq(phba, cmdiocb);
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010841 return;
10842}
10843
James Smarte59058c2008-08-24 21:49:00 -040010844/**
James Smart3621a712009-04-06 18:47:14 -040010845 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
James Smarte59058c2008-08-24 21:49:00 -040010846 * @vport: Pointer to virtual port.
10847 * @pring: Pointer to driver SLI ring object.
10848 * @tgt_id: SCSI ID of the target.
10849 * @lun_id: LUN ID of the scsi device.
10850 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10851 *
10852 * This function sends an abort command for every SCSI command
10853 * associated with the given virtual port pending on the ring
10854 * filtered by lpfc_sli_validate_fcp_iocb function.
10855 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10856 * FCP iocbs associated with lun specified by tgt_id and lun_id
10857 * parameters
10858 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10859 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10860 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10861 * FCP iocbs associated with virtual port.
10862 * This function returns number of iocbs it failed to abort.
10863 * This function is called with no locks held.
10864 **/
dea31012005-04-17 16:05:31 -050010865int
James Smart51ef4c22007-08-02 11:10:31 -040010866lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10867 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea31012005-04-17 16:05:31 -050010868{
James Smart51ef4c22007-08-02 11:10:31 -040010869 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010870 struct lpfc_iocbq *iocbq;
10871 struct lpfc_iocbq *abtsiocb;
dea31012005-04-17 16:05:31 -050010872 IOCB_t *cmd = NULL;
dea31012005-04-17 16:05:31 -050010873 int errcnt = 0, ret_val = 0;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010874 int i;
dea31012005-04-17 16:05:31 -050010875
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010876 for (i = 1; i <= phba->sli.last_iotag; i++) {
10877 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050010878
James Smart51ef4c22007-08-02 11:10:31 -040010879 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
James Smart2e0fef82007-06-17 19:56:36 -050010880 abort_cmd) != 0)
dea31012005-04-17 16:05:31 -050010881 continue;
10882
James Smartafbd8d82013-09-06 12:22:13 -040010883 /*
10884 * If the iocbq is already being aborted, don't take a second
10885 * action, but do count it.
10886 */
10887 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10888 continue;
10889
dea31012005-04-17 16:05:31 -050010890 /* issue ABTS for this IOCB based on iotag */
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010891 abtsiocb = lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050010892 if (abtsiocb == NULL) {
10893 errcnt++;
10894 continue;
10895 }
dea31012005-04-17 16:05:31 -050010896
James Smartafbd8d82013-09-06 12:22:13 -040010897 /* indicate the IO is being aborted by the driver. */
10898 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10899
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010900 cmd = &iocbq->iocb;
dea31012005-04-17 16:05:31 -050010901 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10902 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
James Smartda0436e2009-05-22 14:51:39 -040010903 if (phba->sli_rev == LPFC_SLI_REV4)
10904 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10905 else
10906 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea31012005-04-17 16:05:31 -050010907 abtsiocb->iocb.ulpLe = 1;
10908 abtsiocb->iocb.ulpClass = cmd->ulpClass;
James Smartafbd8d82013-09-06 12:22:13 -040010909 abtsiocb->vport = vport;
dea31012005-04-17 16:05:31 -050010910
James Smart5ffc2662009-11-18 15:39:44 -050010911 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080010912 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
James Smart341af102010-01-26 23:07:37 -050010913 if (iocbq->iocb_flag & LPFC_IO_FCP)
10914 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040010915 if (iocbq->iocb_flag & LPFC_IO_FOF)
10916 abtsiocb->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050010917
James Smart2e0fef82007-06-17 19:56:36 -050010918 if (lpfc_is_link_up(phba))
dea31012005-04-17 16:05:31 -050010919 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10920 else
10921 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10922
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010923 /* Setup callback routine and issue the command. */
10924 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
James Smartda0436e2009-05-22 14:51:39 -040010925 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10926 abtsiocb, 0);
dea31012005-04-17 16:05:31 -050010927 if (ret_val == IOCB_ERROR) {
James Bottomley604a3e32005-10-29 10:28:33 -050010928 lpfc_sli_release_iocbq(phba, abtsiocb);
dea31012005-04-17 16:05:31 -050010929 errcnt++;
10930 continue;
10931 }
10932 }
10933
10934 return errcnt;
10935}
10936
James Smarte59058c2008-08-24 21:49:00 -040010937/**
James Smart98912dda2014-04-04 13:52:31 -040010938 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10939 * @vport: Pointer to virtual port.
10940 * @pring: Pointer to driver SLI ring object.
10941 * @tgt_id: SCSI ID of the target.
10942 * @lun_id: LUN ID of the scsi device.
10943 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10944 *
10945 * This function sends an abort command for every SCSI command
10946 * associated with the given virtual port pending on the ring
10947 * filtered by lpfc_sli_validate_fcp_iocb function.
10948 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10949 * FCP iocbs associated with lun specified by tgt_id and lun_id
10950 * parameters
10951 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10952 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10953 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10954 * FCP iocbs associated with virtual port.
10955 * This function returns number of iocbs it aborted .
10956 * This function is called with no locks held right after a taskmgmt
10957 * command is sent.
10958 **/
10959int
10960lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10961 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10962{
10963 struct lpfc_hba *phba = vport->phba;
James Smart8c50d252014-09-03 12:58:16 -040010964 struct lpfc_scsi_buf *lpfc_cmd;
James Smart98912dda2014-04-04 13:52:31 -040010965 struct lpfc_iocbq *abtsiocbq;
James Smart8c50d252014-09-03 12:58:16 -040010966 struct lpfc_nodelist *ndlp;
James Smart98912dda2014-04-04 13:52:31 -040010967 struct lpfc_iocbq *iocbq;
10968 IOCB_t *icmd;
10969 int sum, i, ret_val;
10970 unsigned long iflags;
10971 struct lpfc_sli_ring *pring_s4;
James Smart98912dda2014-04-04 13:52:31 -040010972
10973 spin_lock_irq(&phba->hbalock);
10974
10975 /* all I/Os are in process of being flushed */
10976 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10977 spin_unlock_irq(&phba->hbalock);
10978 return 0;
10979 }
10980 sum = 0;
10981
10982 for (i = 1; i <= phba->sli.last_iotag; i++) {
10983 iocbq = phba->sli.iocbq_lookup[i];
10984
10985 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10986 cmd) != 0)
10987 continue;
10988
10989 /*
10990 * If the iocbq is already being aborted, don't take a second
10991 * action, but do count it.
10992 */
10993 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10994 continue;
10995
10996 /* issue ABTS for this IOCB based on iotag */
10997 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10998 if (abtsiocbq == NULL)
10999 continue;
11000
11001 icmd = &iocbq->iocb;
11002 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11003 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11004 if (phba->sli_rev == LPFC_SLI_REV4)
11005 abtsiocbq->iocb.un.acxri.abortIoTag =
11006 iocbq->sli4_xritag;
11007 else
11008 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11009 abtsiocbq->iocb.ulpLe = 1;
11010 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11011 abtsiocbq->vport = vport;
11012
11013 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080011014 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
James Smart98912dda2014-04-04 13:52:31 -040011015 if (iocbq->iocb_flag & LPFC_IO_FCP)
11016 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040011017 if (iocbq->iocb_flag & LPFC_IO_FOF)
11018 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
James Smart98912dda2014-04-04 13:52:31 -040011019
James Smart8c50d252014-09-03 12:58:16 -040011020 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11021 ndlp = lpfc_cmd->rdata->pnode;
11022
11023 if (lpfc_is_link_up(phba) &&
11024 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
James Smart98912dda2014-04-04 13:52:31 -040011025 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11026 else
11027 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11028
11029 /* Setup callback routine and issue the command. */
11030 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11031
11032 /*
11033 * Indicate the IO is being aborted by the driver and set
11034 * the caller's flag into the aborted IO.
11035 */
11036 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11037
11038 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -080011039 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11040 if (pring_s4 == NULL)
11041 continue;
James Smart98912dda2014-04-04 13:52:31 -040011042 /* Note: both hbalock and ring_lock must be set here */
11043 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11044 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11045 abtsiocbq, 0);
11046 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11047 } else {
11048 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11049 abtsiocbq, 0);
11050 }
11051
11052
11053 if (ret_val == IOCB_ERROR)
11054 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11055 else
11056 sum++;
11057 }
11058 spin_unlock_irq(&phba->hbalock);
11059 return sum;
11060}
11061
11062/**
James Smart3621a712009-04-06 18:47:14 -040011063 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
James Smarte59058c2008-08-24 21:49:00 -040011064 * @phba: Pointer to HBA context object.
11065 * @cmdiocbq: Pointer to command iocb.
11066 * @rspiocbq: Pointer to response iocb.
11067 *
11068 * This function is the completion handler for iocbs issued using
11069 * lpfc_sli_issue_iocb_wait function. This function is called by the
11070 * ring event handler function without any lock held. This function
11071 * can be called from both worker thread context and interrupt
11072 * context. This function also can be called from other thread which
11073 * cleans up the SLI layer objects.
11074 * This function copy the contents of the response iocb to the
11075 * response iocb memory object provided by the caller of
11076 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11077 * sleeps for the iocb completion.
11078 **/
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011079static void
11080lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11081 struct lpfc_iocbq *cmdiocbq,
11082 struct lpfc_iocbq *rspiocbq)
dea31012005-04-17 16:05:31 -050011083{
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011084 wait_queue_head_t *pdone_q;
11085 unsigned long iflags;
James Smart0f65ff62010-02-26 14:14:23 -050011086 struct lpfc_scsi_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050011087
James Smart2e0fef82007-06-17 19:56:36 -050011088 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart5a0916b2013-07-15 18:31:42 -040011089 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11090
11091 /*
11092 * A time out has occurred for the iocb. If a time out
11093 * completion handler has been supplied, call it. Otherwise,
11094 * just free the iocbq.
11095 */
11096
11097 spin_unlock_irqrestore(&phba->hbalock, iflags);
11098 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11099 cmdiocbq->wait_iocb_cmpl = NULL;
11100 if (cmdiocbq->iocb_cmpl)
11101 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11102 else
11103 lpfc_sli_release_iocbq(phba, cmdiocbq);
11104 return;
11105 }
11106
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011107 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11108 if (cmdiocbq->context2 && rspiocbq)
11109 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11110 &rspiocbq->iocb, sizeof(IOCB_t));
11111
James Smart0f65ff62010-02-26 14:14:23 -050011112 /* Set the exchange busy flag for task management commands */
11113 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11114 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11115 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11116 cur_iocbq);
11117 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11118 }
11119
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011120 pdone_q = cmdiocbq->context_un.wait_queue;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011121 if (pdone_q)
11122 wake_up(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -050011123 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea31012005-04-17 16:05:31 -050011124 return;
11125}
11126
James Smarte59058c2008-08-24 21:49:00 -040011127/**
James Smartd11e31d2009-06-10 17:23:06 -040011128 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11129 * @phba: Pointer to HBA context object..
11130 * @piocbq: Pointer to command iocb.
11131 * @flag: Flag to test.
11132 *
11133 * This routine grabs the hbalock and then test the iocb_flag to
11134 * see if the passed in flag is set.
11135 * Returns:
11136 * 1 if flag is set.
11137 * 0 if flag is not set.
11138 **/
11139static int
11140lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11141 struct lpfc_iocbq *piocbq, uint32_t flag)
11142{
11143 unsigned long iflags;
11144 int ret;
11145
11146 spin_lock_irqsave(&phba->hbalock, iflags);
11147 ret = piocbq->iocb_flag & flag;
11148 spin_unlock_irqrestore(&phba->hbalock, iflags);
11149 return ret;
11150
11151}
11152
11153/**
James Smart3621a712009-04-06 18:47:14 -040011154 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
James Smarte59058c2008-08-24 21:49:00 -040011155 * @phba: Pointer to HBA context object..
11156 * @pring: Pointer to sli ring.
11157 * @piocb: Pointer to command iocb.
11158 * @prspiocbq: Pointer to response iocb.
11159 * @timeout: Timeout in number of seconds.
11160 *
11161 * This function issues the iocb to firmware and waits for the
James Smart5a0916b2013-07-15 18:31:42 -040011162 * iocb to complete. The iocb_cmpl field of the shall be used
11163 * to handle iocbs which time out. If the field is NULL, the
11164 * function shall free the iocbq structure. If more clean up is
11165 * needed, the caller is expected to provide a completion function
11166 * that will provide the needed clean up. If the iocb command is
11167 * not completed within timeout seconds, the function will either
11168 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11169 * completion function set in the iocb_cmpl field and then return
11170 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11171 * resources if this function returns IOCB_TIMEDOUT.
James Smarte59058c2008-08-24 21:49:00 -040011172 * The function waits for the iocb completion using an
11173 * non-interruptible wait.
11174 * This function will sleep while waiting for iocb completion.
11175 * So, this function should not be called from any context which
11176 * does not allow sleeping. Due to the same reason, this function
11177 * cannot be called with interrupt disabled.
11178 * This function assumes that the iocb completions occur while
11179 * this function sleep. So, this function cannot be called from
11180 * the thread which process iocb completion for this ring.
11181 * This function clears the iocb_flag of the iocb object before
11182 * issuing the iocb and the iocb completion handler sets this
11183 * flag and wakes this thread when the iocb completes.
11184 * The contents of the response iocb will be copied to prspiocbq
11185 * by the completion handler when the command completes.
11186 * This function returns IOCB_SUCCESS when success.
11187 * This function is called with no lock held.
11188 **/
dea31012005-04-17 16:05:31 -050011189int
James Smart2e0fef82007-06-17 19:56:36 -050011190lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
James Smartda0436e2009-05-22 14:51:39 -040011191 uint32_t ring_number,
James Smart2e0fef82007-06-17 19:56:36 -050011192 struct lpfc_iocbq *piocb,
11193 struct lpfc_iocbq *prspiocbq,
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011194 uint32_t timeout)
dea31012005-04-17 16:05:31 -050011195{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080011196 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011197 long timeleft, timeout_req = 0;
11198 int retval = IOCB_SUCCESS;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011199 uint32_t creg_val;
James Smart0e9bb8d2013-03-01 16:35:12 -050011200 struct lpfc_iocbq *iocb;
11201 int txq_cnt = 0;
11202 int txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080011203 struct lpfc_sli_ring *pring;
James Smart5a0916b2013-07-15 18:31:42 -040011204 unsigned long iflags;
11205 bool iocb_completed = true;
11206
James Smart895427b2017-02-12 13:52:30 -080011207 if (phba->sli_rev >= LPFC_SLI_REV4)
11208 pring = lpfc_sli4_calc_ring(phba, piocb);
11209 else
11210 pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -050011211 /*
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011212 * If the caller has provided a response iocbq buffer, then context2
11213 * is NULL or its an error.
dea31012005-04-17 16:05:31 -050011214 */
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011215 if (prspiocbq) {
11216 if (piocb->context2)
11217 return IOCB_ERROR;
11218 piocb->context2 = prspiocbq;
dea31012005-04-17 16:05:31 -050011219 }
11220
James Smart5a0916b2013-07-15 18:31:42 -040011221 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011222 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11223 piocb->context_un.wait_queue = &done_q;
James Smart5a0916b2013-07-15 18:31:42 -040011224 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea31012005-04-17 16:05:31 -050011225
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011226 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050011227 if (lpfc_readl(phba->HCregaddr, &creg_val))
11228 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011229 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11230 writel(creg_val, phba->HCregaddr);
11231 readl(phba->HCregaddr); /* flush */
11232 }
11233
James Smart2a9bf3d2010-06-07 15:24:45 -040011234 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11235 SLI_IOCB_RET_IOCB);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011236 if (retval == IOCB_SUCCESS) {
James Smart256ec0d2013-04-17 20:14:58 -040011237 timeout_req = msecs_to_jiffies(timeout * 1000);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011238 timeleft = wait_event_timeout(done_q,
James Smartd11e31d2009-06-10 17:23:06 -040011239 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011240 timeout_req);
James Smart5a0916b2013-07-15 18:31:42 -040011241 spin_lock_irqsave(&phba->hbalock, iflags);
11242 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
dea31012005-04-17 16:05:31 -050011243
James Smart5a0916b2013-07-15 18:31:42 -040011244 /*
11245 * IOCB timed out. Inform the wake iocb wait
11246 * completion function and set local status
11247 */
11248
11249 iocb_completed = false;
11250 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11251 }
11252 spin_unlock_irqrestore(&phba->hbalock, iflags);
11253 if (iocb_completed) {
James Smart7054a602007-04-25 09:52:34 -040011254 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011255 "0331 IOCB wake signaled\n");
James Smart53151bb2013-10-10 12:24:07 -040011256 /* Note: we are not indicating if the IOCB has a success
11257 * status or not - that's for the caller to check.
11258 * IOCB_SUCCESS means just that the command was sent and
11259 * completed. Not that it completed successfully.
11260 * */
James Smart7054a602007-04-25 09:52:34 -040011261 } else if (timeleft == 0) {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011262 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011263 "0338 IOCB wait timeout error - no "
11264 "wake response Data x%x\n", timeout);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011265 retval = IOCB_TIMEDOUT;
James Smart7054a602007-04-25 09:52:34 -040011266 } else {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011268 "0330 IOCB wake NOT set, "
11269 "Data x%x x%lx\n",
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011270 timeout, (timeleft / jiffies));
11271 retval = IOCB_TIMEDOUT;
dea31012005-04-17 16:05:31 -050011272 }
James Smart2a9bf3d2010-06-07 15:24:45 -040011273 } else if (retval == IOCB_BUSY) {
James Smart0e9bb8d2013-03-01 16:35:12 -050011274 if (phba->cfg_log_verbose & LOG_SLI) {
11275 list_for_each_entry(iocb, &pring->txq, list) {
11276 txq_cnt++;
11277 }
11278 list_for_each_entry(iocb, &pring->txcmplq, list) {
11279 txcmplq_cnt++;
11280 }
11281 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11282 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11283 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11284 }
James Smart2a9bf3d2010-06-07 15:24:45 -040011285 return retval;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011286 } else {
11287 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040011288 "0332 IOCB wait issue failed, Data x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011289 retval);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011290 retval = IOCB_ERROR;
dea31012005-04-17 16:05:31 -050011291 }
11292
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011293 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050011294 if (lpfc_readl(phba->HCregaddr, &creg_val))
11295 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011296 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11297 writel(creg_val, phba->HCregaddr);
11298 readl(phba->HCregaddr); /* flush */
11299 }
11300
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011301 if (prspiocbq)
11302 piocb->context2 = NULL;
11303
11304 piocb->context_un.wait_queue = NULL;
11305 piocb->iocb_cmpl = NULL;
dea31012005-04-17 16:05:31 -050011306 return retval;
11307}
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011308
James Smarte59058c2008-08-24 21:49:00 -040011309/**
James Smart3621a712009-04-06 18:47:14 -040011310 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
James Smarte59058c2008-08-24 21:49:00 -040011311 * @phba: Pointer to HBA context object.
11312 * @pmboxq: Pointer to driver mailbox object.
11313 * @timeout: Timeout in number of seconds.
11314 *
11315 * This function issues the mailbox to firmware and waits for the
11316 * mailbox command to complete. If the mailbox command is not
11317 * completed within timeout seconds, it returns MBX_TIMEOUT.
11318 * The function waits for the mailbox completion using an
11319 * interruptible wait. If the thread is woken up due to a
11320 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11321 * should not free the mailbox resources, if this function returns
11322 * MBX_TIMEOUT.
11323 * This function will sleep while waiting for mailbox completion.
11324 * So, this function should not be called from any context which
11325 * does not allow sleeping. Due to the same reason, this function
11326 * cannot be called with interrupt disabled.
11327 * This function assumes that the mailbox completion occurs while
11328 * this function sleep. So, this function cannot be called from
11329 * the worker thread which processes mailbox completion.
11330 * This function is called in the context of HBA management
11331 * applications.
11332 * This function returns MBX_SUCCESS when successful.
11333 * This function is called with no lock held.
11334 **/
dea31012005-04-17 16:05:31 -050011335int
James Smart2e0fef82007-06-17 19:56:36 -050011336lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea31012005-04-17 16:05:31 -050011337 uint32_t timeout)
11338{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080011339 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James Smartb230b8a22013-05-31 17:05:27 -040011340 MAILBOX_t *mb = NULL;
dea31012005-04-17 16:05:31 -050011341 int retval;
James Smart858c9f62007-06-17 19:56:39 -050011342 unsigned long flag;
dea31012005-04-17 16:05:31 -050011343
James Smartb230b8a22013-05-31 17:05:27 -040011344 /* The caller might set context1 for extended buffer */
James Smart98c9ea52007-10-27 13:37:33 -040011345 if (pmboxq->context1)
James Smartb230b8a22013-05-31 17:05:27 -040011346 mb = (MAILBOX_t *)pmboxq->context1;
dea31012005-04-17 16:05:31 -050011347
James Smart495a7142008-06-14 22:52:59 -040011348 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea31012005-04-17 16:05:31 -050011349 /* setup wake call as IOCB callback */
11350 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11351 /* setup context field to pass wait_queue pointer to wake function */
11352 pmboxq->context1 = &done_q;
11353
dea31012005-04-17 16:05:31 -050011354 /* now issue the command */
11355 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea31012005-04-17 16:05:31 -050011356 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
James Smart7054a602007-04-25 09:52:34 -040011357 wait_event_interruptible_timeout(done_q,
11358 pmboxq->mbox_flag & LPFC_MBX_WAKE,
James Smart256ec0d2013-04-17 20:14:58 -040011359 msecs_to_jiffies(timeout * 1000));
James Smart7054a602007-04-25 09:52:34 -040011360
James Smart858c9f62007-06-17 19:56:39 -050011361 spin_lock_irqsave(&phba->hbalock, flag);
James Smartb230b8a22013-05-31 17:05:27 -040011362 /* restore the possible extended buffer for free resource */
11363 pmboxq->context1 = (uint8_t *)mb;
James Smart7054a602007-04-25 09:52:34 -040011364 /*
11365 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11366 * else do not free the resources.
11367 */
James Smartd7c47992010-06-08 18:31:54 -040011368 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea31012005-04-17 16:05:31 -050011369 retval = MBX_SUCCESS;
James Smartd7c47992010-06-08 18:31:54 -040011370 } else {
James Smart7054a602007-04-25 09:52:34 -040011371 retval = MBX_TIMEOUT;
James Smart858c9f62007-06-17 19:56:39 -050011372 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11373 }
11374 spin_unlock_irqrestore(&phba->hbalock, flag);
James Smartb230b8a22013-05-31 17:05:27 -040011375 } else {
11376 /* restore the possible extended buffer for free resource */
11377 pmboxq->context1 = (uint8_t *)mb;
dea31012005-04-17 16:05:31 -050011378 }
11379
dea31012005-04-17 16:05:31 -050011380 return retval;
11381}
11382
James Smarte59058c2008-08-24 21:49:00 -040011383/**
James Smart3772a992009-05-22 14:50:54 -040011384 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
James Smarte59058c2008-08-24 21:49:00 -040011385 * @phba: Pointer to HBA context.
11386 *
James Smart3772a992009-05-22 14:50:54 -040011387 * This function is called to shutdown the driver's mailbox sub-system.
11388 * It first marks the mailbox sub-system is in a block state to prevent
11389 * the asynchronous mailbox command from issued off the pending mailbox
11390 * command queue. If the mailbox command sub-system shutdown is due to
11391 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11392 * the mailbox sub-system flush routine to forcefully bring down the
11393 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11394 * as with offline or HBA function reset), this routine will wait for the
11395 * outstanding mailbox command to complete before invoking the mailbox
11396 * sub-system flush routine to gracefully bring down mailbox sub-system.
James Smarte59058c2008-08-24 21:49:00 -040011397 **/
James Smart3772a992009-05-22 14:50:54 -040011398void
James Smart618a5232012-06-12 13:54:36 -040011399lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
James Smartb4c02652006-07-06 15:50:43 -040011400{
James Smart3772a992009-05-22 14:50:54 -040011401 struct lpfc_sli *psli = &phba->sli;
James Smart3772a992009-05-22 14:50:54 -040011402 unsigned long timeout;
11403
James Smart618a5232012-06-12 13:54:36 -040011404 if (mbx_action == LPFC_MBX_NO_WAIT) {
11405 /* delay 100ms for port state */
11406 msleep(100);
11407 lpfc_sli_mbox_sys_flush(phba);
11408 return;
11409 }
James Smarta183a152011-10-10 21:32:43 -040011410 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
James Smartd7069f02012-03-01 22:36:29 -050011411
James Smart3772a992009-05-22 14:50:54 -040011412 spin_lock_irq(&phba->hbalock);
11413 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smart3772a992009-05-22 14:50:54 -040011414
11415 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart3772a992009-05-22 14:50:54 -040011416 /* Determine how long we might wait for the active mailbox
11417 * command to be gracefully completed by firmware.
11418 */
James Smarta183a152011-10-10 21:32:43 -040011419 if (phba->sli.mbox_active)
11420 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11421 phba->sli.mbox_active) *
11422 1000) + jiffies;
11423 spin_unlock_irq(&phba->hbalock);
11424
James Smart3772a992009-05-22 14:50:54 -040011425 while (phba->sli.mbox_active) {
11426 /* Check active mailbox complete status every 2ms */
11427 msleep(2);
11428 if (time_after(jiffies, timeout))
11429 /* Timeout, let the mailbox flush routine to
11430 * forcefully release active mailbox command
11431 */
11432 break;
11433 }
James Smartd7069f02012-03-01 22:36:29 -050011434 } else
11435 spin_unlock_irq(&phba->hbalock);
11436
James Smart3772a992009-05-22 14:50:54 -040011437 lpfc_sli_mbox_sys_flush(phba);
11438}
11439
11440/**
11441 * lpfc_sli_eratt_read - read sli-3 error attention events
11442 * @phba: Pointer to HBA context.
11443 *
11444 * This function is called to read the SLI3 device error attention registers
11445 * for possible error attention events. The caller must hold the hostlock
11446 * with spin_lock_irq().
11447 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011448 * This function returns 1 when there is Error Attention in the Host Attention
James Smart3772a992009-05-22 14:50:54 -040011449 * Register and returns 0 otherwise.
11450 **/
11451static int
11452lpfc_sli_eratt_read(struct lpfc_hba *phba)
11453{
James Smarted957682007-06-17 19:56:37 -050011454 uint32_t ha_copy;
James Smartb4c02652006-07-06 15:50:43 -040011455
James Smart3772a992009-05-22 14:50:54 -040011456 /* Read chip Host Attention (HA) register */
James Smart9940b972011-03-11 16:06:12 -050011457 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11458 goto unplug_err;
11459
James Smart3772a992009-05-22 14:50:54 -040011460 if (ha_copy & HA_ERATT) {
11461 /* Read host status register to retrieve error event */
James Smart9940b972011-03-11 16:06:12 -050011462 if (lpfc_sli_read_hs(phba))
11463 goto unplug_err;
James Smartb4c02652006-07-06 15:50:43 -040011464
James Smart3772a992009-05-22 14:50:54 -040011465 /* Check if there is a deferred error condition is active */
11466 if ((HS_FFER1 & phba->work_hs) &&
11467 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040011468 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
James Smart3772a992009-05-22 14:50:54 -040011469 phba->hba_flag |= DEFER_ERATT;
James Smart3772a992009-05-22 14:50:54 -040011470 /* Clear all interrupt enable conditions */
11471 writel(0, phba->HCregaddr);
11472 readl(phba->HCregaddr);
11473 }
11474
11475 /* Set the driver HA work bitmap */
James Smart3772a992009-05-22 14:50:54 -040011476 phba->work_ha |= HA_ERATT;
11477 /* Indicate polling handles this ERATT */
11478 phba->hba_flag |= HBA_ERATT_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040011479 return 1;
James Smartb4c02652006-07-06 15:50:43 -040011480 }
James Smart3772a992009-05-22 14:50:54 -040011481 return 0;
James Smart9940b972011-03-11 16:06:12 -050011482
11483unplug_err:
11484 /* Set the driver HS work bitmap */
11485 phba->work_hs |= UNPLUG_ERR;
11486 /* Set the driver HA work bitmap */
11487 phba->work_ha |= HA_ERATT;
11488 /* Indicate polling handles this ERATT */
11489 phba->hba_flag |= HBA_ERATT_HANDLED;
11490 return 1;
James Smartb4c02652006-07-06 15:50:43 -040011491}
11492
James Smarte59058c2008-08-24 21:49:00 -040011493/**
James Smartda0436e2009-05-22 14:51:39 -040011494 * lpfc_sli4_eratt_read - read sli-4 error attention events
11495 * @phba: Pointer to HBA context.
11496 *
11497 * This function is called to read the SLI4 device error attention registers
11498 * for possible error attention events. The caller must hold the hostlock
11499 * with spin_lock_irq().
11500 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011501 * This function returns 1 when there is Error Attention in the Host Attention
James Smartda0436e2009-05-22 14:51:39 -040011502 * Register and returns 0 otherwise.
11503 **/
11504static int
11505lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11506{
11507 uint32_t uerr_sta_hi, uerr_sta_lo;
James Smart2fcee4b2010-12-15 17:57:46 -050011508 uint32_t if_type, portsmphr;
11509 struct lpfc_register portstat_reg;
James Smartda0436e2009-05-22 14:51:39 -040011510
James Smart2fcee4b2010-12-15 17:57:46 -050011511 /*
11512 * For now, use the SLI4 device internal unrecoverable error
James Smartda0436e2009-05-22 14:51:39 -040011513 * registers for error attention. This can be changed later.
11514 */
James Smart2fcee4b2010-12-15 17:57:46 -050011515 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11516 switch (if_type) {
11517 case LPFC_SLI_INTF_IF_TYPE_0:
James Smart9940b972011-03-11 16:06:12 -050011518 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11519 &uerr_sta_lo) ||
11520 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11521 &uerr_sta_hi)) {
11522 phba->work_hs |= UNPLUG_ERR;
11523 phba->work_ha |= HA_ERATT;
11524 phba->hba_flag |= HBA_ERATT_HANDLED;
11525 return 1;
11526 }
James Smart2fcee4b2010-12-15 17:57:46 -050011527 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11528 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11530 "1423 HBA Unrecoverable error: "
11531 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11532 "ue_mask_lo_reg=0x%x, "
11533 "ue_mask_hi_reg=0x%x\n",
11534 uerr_sta_lo, uerr_sta_hi,
11535 phba->sli4_hba.ue_mask_lo,
11536 phba->sli4_hba.ue_mask_hi);
11537 phba->work_status[0] = uerr_sta_lo;
11538 phba->work_status[1] = uerr_sta_hi;
11539 phba->work_ha |= HA_ERATT;
11540 phba->hba_flag |= HBA_ERATT_HANDLED;
11541 return 1;
11542 }
11543 break;
11544 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart9940b972011-03-11 16:06:12 -050011545 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11546 &portstat_reg.word0) ||
11547 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11548 &portsmphr)){
11549 phba->work_hs |= UNPLUG_ERR;
11550 phba->work_ha |= HA_ERATT;
11551 phba->hba_flag |= HBA_ERATT_HANDLED;
11552 return 1;
11553 }
James Smart2fcee4b2010-12-15 17:57:46 -050011554 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11555 phba->work_status[0] =
11556 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11557 phba->work_status[1] =
11558 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2e90f4b2011-12-13 13:22:37 -050011560 "2885 Port Status Event: "
James Smart2fcee4b2010-12-15 17:57:46 -050011561 "port status reg 0x%x, "
11562 "port smphr reg 0x%x, "
11563 "error 1=0x%x, error 2=0x%x\n",
11564 portstat_reg.word0,
11565 portsmphr,
11566 phba->work_status[0],
11567 phba->work_status[1]);
11568 phba->work_ha |= HA_ERATT;
11569 phba->hba_flag |= HBA_ERATT_HANDLED;
11570 return 1;
11571 }
11572 break;
11573 case LPFC_SLI_INTF_IF_TYPE_1:
11574 default:
James Smarta747c9c2009-11-18 15:41:10 -050011575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2fcee4b2010-12-15 17:57:46 -050011576 "2886 HBA Error Attention on unsupported "
11577 "if type %d.", if_type);
James Smarta747c9c2009-11-18 15:41:10 -050011578 return 1;
James Smartda0436e2009-05-22 14:51:39 -040011579 }
James Smart2fcee4b2010-12-15 17:57:46 -050011580
James Smartda0436e2009-05-22 14:51:39 -040011581 return 0;
11582}
11583
11584/**
James Smart3621a712009-04-06 18:47:14 -040011585 * lpfc_sli_check_eratt - check error attention events
James Smart93996272008-08-24 21:50:30 -040011586 * @phba: Pointer to HBA context.
11587 *
James Smart3772a992009-05-22 14:50:54 -040011588 * This function is called from timer soft interrupt context to check HBA's
James Smart93996272008-08-24 21:50:30 -040011589 * error attention register bit for error attention events.
11590 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011591 * This function returns 1 when there is Error Attention in the Host Attention
James Smart93996272008-08-24 21:50:30 -040011592 * Register and returns 0 otherwise.
11593 **/
11594int
11595lpfc_sli_check_eratt(struct lpfc_hba *phba)
11596{
11597 uint32_t ha_copy;
11598
11599 /* If somebody is waiting to handle an eratt, don't process it
11600 * here. The brdkill function will do this.
11601 */
11602 if (phba->link_flag & LS_IGNORE_ERATT)
11603 return 0;
11604
11605 /* Check if interrupt handler handles this ERATT */
11606 spin_lock_irq(&phba->hbalock);
11607 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11608 /* Interrupt handler has handled ERATT */
11609 spin_unlock_irq(&phba->hbalock);
11610 return 0;
11611 }
11612
James Smarta257bf92009-04-06 18:48:10 -040011613 /*
11614 * If there is deferred error attention, do not check for error
11615 * attention
11616 */
11617 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11618 spin_unlock_irq(&phba->hbalock);
11619 return 0;
11620 }
11621
James Smart3772a992009-05-22 14:50:54 -040011622 /* If PCI channel is offline, don't process it */
11623 if (unlikely(pci_channel_offline(phba->pcidev))) {
James Smart93996272008-08-24 21:50:30 -040011624 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040011625 return 0;
11626 }
11627
11628 switch (phba->sli_rev) {
11629 case LPFC_SLI_REV2:
11630 case LPFC_SLI_REV3:
11631 /* Read chip Host Attention (HA) register */
11632 ha_copy = lpfc_sli_eratt_read(phba);
11633 break;
James Smartda0436e2009-05-22 14:51:39 -040011634 case LPFC_SLI_REV4:
James Smart2fcee4b2010-12-15 17:57:46 -050011635 /* Read device Uncoverable Error (UERR) registers */
James Smartda0436e2009-05-22 14:51:39 -040011636 ha_copy = lpfc_sli4_eratt_read(phba);
11637 break;
James Smart3772a992009-05-22 14:50:54 -040011638 default:
11639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11640 "0299 Invalid SLI revision (%d)\n",
11641 phba->sli_rev);
11642 ha_copy = 0;
11643 break;
James Smart93996272008-08-24 21:50:30 -040011644 }
11645 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040011646
11647 return ha_copy;
11648}
11649
11650/**
11651 * lpfc_intr_state_check - Check device state for interrupt handling
11652 * @phba: Pointer to HBA context.
11653 *
11654 * This inline routine checks whether a device or its PCI slot is in a state
11655 * that the interrupt should be handled.
11656 *
11657 * This function returns 0 if the device or the PCI slot is in a state that
11658 * interrupt should be handled, otherwise -EIO.
11659 */
11660static inline int
11661lpfc_intr_state_check(struct lpfc_hba *phba)
11662{
11663 /* If the pci channel is offline, ignore all the interrupts */
11664 if (unlikely(pci_channel_offline(phba->pcidev)))
11665 return -EIO;
11666
11667 /* Update device level interrupt statistics */
11668 phba->sli.slistat.sli_intr++;
11669
11670 /* Ignore all interrupts during initialization. */
11671 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11672 return -EIO;
11673
James Smart93996272008-08-24 21:50:30 -040011674 return 0;
11675}
11676
11677/**
James Smart3772a992009-05-22 14:50:54 -040011678 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
James Smarte59058c2008-08-24 21:49:00 -040011679 * @irq: Interrupt number.
11680 * @dev_id: The device context pointer.
11681 *
James Smart93996272008-08-24 21:50:30 -040011682 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040011683 * service routine when device with SLI-3 interface spec is enabled with
11684 * MSI-X multi-message interrupt mode and there are slow-path events in
11685 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11686 * interrupt mode, this function is called as part of the device-level
11687 * interrupt handler. When the PCI slot is in error recovery or the HBA
11688 * is undergoing initialization, the interrupt handler will not process
11689 * the interrupt. The link attention and ELS ring attention events are
11690 * handled by the worker thread. The interrupt handler signals the worker
11691 * thread and returns for these events. This function is called without
11692 * any lock held. It gets the hbalock to access and update SLI data
James Smart93996272008-08-24 21:50:30 -040011693 * structures.
11694 *
11695 * This function returns IRQ_HANDLED when interrupt is handled else it
11696 * returns IRQ_NONE.
James Smarte59058c2008-08-24 21:49:00 -040011697 **/
dea31012005-04-17 16:05:31 -050011698irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040011699lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea31012005-04-17 16:05:31 -050011700{
James Smart2e0fef82007-06-17 19:56:36 -050011701 struct lpfc_hba *phba;
James Smarta747c9c2009-11-18 15:41:10 -050011702 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -050011703 uint32_t work_ha_copy;
11704 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050011705 unsigned long iflag;
dea31012005-04-17 16:05:31 -050011706 uint32_t control;
11707
James Smart92d7f7b2007-06-17 19:56:38 -050011708 MAILBOX_t *mbox, *pmbox;
James Smart858c9f62007-06-17 19:56:39 -050011709 struct lpfc_vport *vport;
11710 struct lpfc_nodelist *ndlp;
11711 struct lpfc_dmabuf *mp;
James Smart92d7f7b2007-06-17 19:56:38 -050011712 LPFC_MBOXQ_t *pmb;
11713 int rc;
11714
dea31012005-04-17 16:05:31 -050011715 /*
11716 * Get the driver's phba structure from the dev_id and
11717 * assume the HBA is not interrupting.
11718 */
James Smart93996272008-08-24 21:50:30 -040011719 phba = (struct lpfc_hba *)dev_id;
dea31012005-04-17 16:05:31 -050011720
11721 if (unlikely(!phba))
11722 return IRQ_NONE;
11723
dea31012005-04-17 16:05:31 -050011724 /*
James Smart93996272008-08-24 21:50:30 -040011725 * Stuff needs to be attented to when this function is invoked as an
11726 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050011727 */
James Smart93996272008-08-24 21:50:30 -040011728 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040011729 /* Check device state for handling interrupt */
11730 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040011731 return IRQ_NONE;
11732 /* Need to read HA REG for slow-path events */
James Smart5b75da22008-12-04 22:39:35 -050011733 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050011734 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11735 goto unplug_error;
James Smart93996272008-08-24 21:50:30 -040011736 /* If somebody is waiting to handle an eratt don't process it
11737 * here. The brdkill function will do this.
11738 */
11739 if (phba->link_flag & LS_IGNORE_ERATT)
11740 ha_copy &= ~HA_ERATT;
11741 /* Check the need for handling ERATT in interrupt handler */
11742 if (ha_copy & HA_ERATT) {
11743 if (phba->hba_flag & HBA_ERATT_HANDLED)
11744 /* ERATT polling has handled ERATT */
11745 ha_copy &= ~HA_ERATT;
11746 else
11747 /* Indicate interrupt handler handles ERATT */
11748 phba->hba_flag |= HBA_ERATT_HANDLED;
11749 }
James Smarta257bf92009-04-06 18:48:10 -040011750
11751 /*
11752 * If there is deferred error attention, do not check for any
11753 * interrupt.
11754 */
11755 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040011756 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040011757 return IRQ_NONE;
11758 }
11759
James Smart93996272008-08-24 21:50:30 -040011760 /* Clear up only attention source related to slow-path */
James Smart9940b972011-03-11 16:06:12 -050011761 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11762 goto unplug_error;
11763
James Smarta747c9c2009-11-18 15:41:10 -050011764 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11765 HC_LAINT_ENA | HC_ERINT_ENA),
11766 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040011767 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11768 phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050011769 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040011770 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050011771 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040011772 } else
11773 ha_copy = phba->ha_copy;
dea31012005-04-17 16:05:31 -050011774
dea31012005-04-17 16:05:31 -050011775 work_ha_copy = ha_copy & phba->work_ha_mask;
11776
James Smart93996272008-08-24 21:50:30 -040011777 if (work_ha_copy) {
dea31012005-04-17 16:05:31 -050011778 if (work_ha_copy & HA_LATT) {
11779 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11780 /*
11781 * Turn off Link Attention interrupts
11782 * until CLEAR_LA done
11783 */
James Smart5b75da22008-12-04 22:39:35 -050011784 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011785 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
James Smart9940b972011-03-11 16:06:12 -050011786 if (lpfc_readl(phba->HCregaddr, &control))
11787 goto unplug_error;
dea31012005-04-17 16:05:31 -050011788 control &= ~HC_LAINT_ENA;
11789 writel(control, phba->HCregaddr);
11790 readl(phba->HCregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050011791 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011792 }
11793 else
11794 work_ha_copy &= ~HA_LATT;
11795 }
11796
James Smart93996272008-08-24 21:50:30 -040011797 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
James Smart858c9f62007-06-17 19:56:39 -050011798 /*
11799 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11800 * the only slow ring.
11801 */
11802 status = (work_ha_copy &
11803 (HA_RXMASK << (4*LPFC_ELS_RING)));
11804 status >>= (4*LPFC_ELS_RING);
11805 if (status & HA_RXMASK) {
James Smart5b75da22008-12-04 22:39:35 -050011806 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050011807 if (lpfc_readl(phba->HCregaddr, &control))
11808 goto unplug_error;
James Smarta58cbd52007-08-02 11:09:43 -040011809
11810 lpfc_debugfs_slow_ring_trc(phba,
11811 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11812 control, status,
11813 (uint32_t)phba->sli.slistat.sli_intr);
11814
James Smart858c9f62007-06-17 19:56:39 -050011815 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
James Smarta58cbd52007-08-02 11:09:43 -040011816 lpfc_debugfs_slow_ring_trc(phba,
11817 "ISR Disable ring:"
11818 "pwork:x%x hawork:x%x wait:x%x",
11819 phba->work_ha, work_ha_copy,
11820 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040011821 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040011822
James Smart858c9f62007-06-17 19:56:39 -050011823 control &=
11824 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea31012005-04-17 16:05:31 -050011825 writel(control, phba->HCregaddr);
11826 readl(phba->HCregaddr); /* flush */
dea31012005-04-17 16:05:31 -050011827 }
James Smarta58cbd52007-08-02 11:09:43 -040011828 else {
11829 lpfc_debugfs_slow_ring_trc(phba,
11830 "ISR slow ring: pwork:"
11831 "x%x hawork:x%x wait:x%x",
11832 phba->work_ha, work_ha_copy,
11833 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040011834 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040011835 }
James Smart5b75da22008-12-04 22:39:35 -050011836 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011837 }
11838 }
James Smart5b75da22008-12-04 22:39:35 -050011839 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040011840 if (work_ha_copy & HA_ERATT) {
James Smart9940b972011-03-11 16:06:12 -050011841 if (lpfc_sli_read_hs(phba))
11842 goto unplug_error;
James Smarta257bf92009-04-06 18:48:10 -040011843 /*
11844 * Check if there is a deferred error condition
11845 * is active
11846 */
11847 if ((HS_FFER1 & phba->work_hs) &&
11848 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040011849 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11850 phba->work_hs)) {
James Smarta257bf92009-04-06 18:48:10 -040011851 phba->hba_flag |= DEFER_ERATT;
11852 /* Clear all interrupt enable conditions */
11853 writel(0, phba->HCregaddr);
11854 readl(phba->HCregaddr);
11855 }
11856 }
11857
James Smart93996272008-08-24 21:50:30 -040011858 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
James Smart92d7f7b2007-06-17 19:56:38 -050011859 pmb = phba->sli.mbox_active;
James Smart04c68492009-05-22 14:52:52 -040011860 pmbox = &pmb->u.mb;
James Smart34b02dc2008-08-24 21:49:55 -040011861 mbox = phba->mbox;
James Smart858c9f62007-06-17 19:56:39 -050011862 vport = pmb->vport;
James Smart92d7f7b2007-06-17 19:56:38 -050011863
11864 /* First check out the status word */
11865 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11866 if (pmbox->mbxOwner != OWN_HOST) {
James Smart5b75da22008-12-04 22:39:35 -050011867 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -050011868 /*
11869 * Stray Mailbox Interrupt, mbxCommand <cmd>
11870 * mbxStatus <status>
11871 */
James Smart09372822008-01-11 01:52:54 -050011872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
James Smart92d7f7b2007-06-17 19:56:38 -050011873 LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011874 "(%d):0304 Stray Mailbox "
James Smart92d7f7b2007-06-17 19:56:38 -050011875 "Interrupt mbxCommand x%x "
11876 "mbxStatus x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011877 (vport ? vport->vpi : 0),
James Smart92d7f7b2007-06-17 19:56:38 -050011878 pmbox->mbxCommand,
11879 pmbox->mbxStatus);
James Smart09372822008-01-11 01:52:54 -050011880 /* clear mailbox attention bit */
11881 work_ha_copy &= ~HA_MBATT;
11882 } else {
James Smart97eab632008-04-07 10:16:05 -040011883 phba->sli.mbox_active = NULL;
James Smart5b75da22008-12-04 22:39:35 -050011884 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart09372822008-01-11 01:52:54 -050011885 phba->last_completion_time = jiffies;
11886 del_timer(&phba->sli.mbox_tmo);
James Smart09372822008-01-11 01:52:54 -050011887 if (pmb->mbox_cmpl) {
11888 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11889 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -040011890 if (pmb->out_ext_byte_len &&
11891 pmb->context2)
11892 lpfc_sli_pcimem_bcopy(
11893 phba->mbox_ext,
11894 pmb->context2,
11895 pmb->out_ext_byte_len);
James Smart858c9f62007-06-17 19:56:39 -050011896 }
James Smart09372822008-01-11 01:52:54 -050011897 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11898 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11899
11900 lpfc_debugfs_disc_trc(vport,
11901 LPFC_DISC_TRC_MBOX_VPORT,
11902 "MBOX dflt rpi: : "
11903 "status:x%x rpi:x%x",
11904 (uint32_t)pmbox->mbxStatus,
11905 pmbox->un.varWords[0], 0);
11906
11907 if (!pmbox->mbxStatus) {
11908 mp = (struct lpfc_dmabuf *)
11909 (pmb->context1);
11910 ndlp = (struct lpfc_nodelist *)
11911 pmb->context2;
11912
11913 /* Reg_LOGIN of dflt RPI was
11914 * successful. new lets get
11915 * rid of the RPI using the
11916 * same mbox buffer.
11917 */
11918 lpfc_unreg_login(phba,
11919 vport->vpi,
11920 pmbox->un.varWords[0],
11921 pmb);
11922 pmb->mbox_cmpl =
11923 lpfc_mbx_cmpl_dflt_rpi;
11924 pmb->context1 = mp;
11925 pmb->context2 = ndlp;
11926 pmb->vport = vport;
James Smart58da1ff2008-04-07 10:15:56 -040011927 rc = lpfc_sli_issue_mbox(phba,
11928 pmb,
11929 MBX_NOWAIT);
11930 if (rc != MBX_BUSY)
11931 lpfc_printf_log(phba,
11932 KERN_ERR,
11933 LOG_MBOX | LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040011934 "0350 rc should have"
James Smart6a9c52c2009-10-02 15:16:51 -040011935 "been MBX_BUSY\n");
James Smart3772a992009-05-22 14:50:54 -040011936 if (rc != MBX_NOT_FINISHED)
11937 goto send_current_mbox;
James Smart09372822008-01-11 01:52:54 -050011938 }
11939 }
James Smart5b75da22008-12-04 22:39:35 -050011940 spin_lock_irqsave(
11941 &phba->pport->work_port_lock,
11942 iflag);
James Smart09372822008-01-11 01:52:54 -050011943 phba->pport->work_port_events &=
11944 ~WORKER_MBOX_TMO;
James Smart5b75da22008-12-04 22:39:35 -050011945 spin_unlock_irqrestore(
11946 &phba->pport->work_port_lock,
11947 iflag);
James Smart09372822008-01-11 01:52:54 -050011948 lpfc_mbox_cmpl_put(phba, pmb);
James Smart858c9f62007-06-17 19:56:39 -050011949 }
James Smart97eab632008-04-07 10:16:05 -040011950 } else
James Smart5b75da22008-12-04 22:39:35 -050011951 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040011952
James Smart92d7f7b2007-06-17 19:56:38 -050011953 if ((work_ha_copy & HA_MBATT) &&
11954 (phba->sli.mbox_active == NULL)) {
James Smart858c9f62007-06-17 19:56:39 -050011955send_current_mbox:
James Smart92d7f7b2007-06-17 19:56:38 -050011956 /* Process next mailbox command if there is one */
James Smart58da1ff2008-04-07 10:15:56 -040011957 do {
11958 rc = lpfc_sli_issue_mbox(phba, NULL,
11959 MBX_NOWAIT);
11960 } while (rc == MBX_NOT_FINISHED);
11961 if (rc != MBX_SUCCESS)
11962 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11963 LOG_SLI, "0349 rc should be "
James Smart6a9c52c2009-10-02 15:16:51 -040011964 "MBX_SUCCESS\n");
James Smart92d7f7b2007-06-17 19:56:38 -050011965 }
11966
James Smart5b75da22008-12-04 22:39:35 -050011967 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011968 phba->work_ha |= work_ha_copy;
James Smart5b75da22008-12-04 22:39:35 -050011969 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart5e9d9b82008-06-14 22:52:53 -040011970 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -050011971 }
James Smart93996272008-08-24 21:50:30 -040011972 return IRQ_HANDLED;
James Smart9940b972011-03-11 16:06:12 -050011973unplug_error:
11974 spin_unlock_irqrestore(&phba->hbalock, iflag);
11975 return IRQ_HANDLED;
dea31012005-04-17 16:05:31 -050011976
James Smart3772a992009-05-22 14:50:54 -040011977} /* lpfc_sli_sp_intr_handler */
James Smart93996272008-08-24 21:50:30 -040011978
11979/**
James Smart3772a992009-05-22 14:50:54 -040011980 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
James Smart93996272008-08-24 21:50:30 -040011981 * @irq: Interrupt number.
11982 * @dev_id: The device context pointer.
11983 *
11984 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040011985 * service routine when device with SLI-3 interface spec is enabled with
11986 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11987 * ring event in the HBA. However, when the device is enabled with either
11988 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11989 * device-level interrupt handler. When the PCI slot is in error recovery
11990 * or the HBA is undergoing initialization, the interrupt handler will not
11991 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11992 * the intrrupt context. This function is called without any lock held.
11993 * It gets the hbalock to access and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040011994 *
11995 * This function returns IRQ_HANDLED when interrupt is handled else it
11996 * returns IRQ_NONE.
11997 **/
11998irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040011999lpfc_sli_fp_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040012000{
12001 struct lpfc_hba *phba;
12002 uint32_t ha_copy;
12003 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050012004 unsigned long iflag;
James Smart895427b2017-02-12 13:52:30 -080012005 struct lpfc_sli_ring *pring;
James Smart93996272008-08-24 21:50:30 -040012006
12007 /* Get the driver's phba structure from the dev_id and
12008 * assume the HBA is not interrupting.
12009 */
12010 phba = (struct lpfc_hba *) dev_id;
12011
12012 if (unlikely(!phba))
12013 return IRQ_NONE;
dea31012005-04-17 16:05:31 -050012014
12015 /*
James Smart93996272008-08-24 21:50:30 -040012016 * Stuff needs to be attented to when this function is invoked as an
12017 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050012018 */
James Smart93996272008-08-24 21:50:30 -040012019 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040012020 /* Check device state for handling interrupt */
12021 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012022 return IRQ_NONE;
12023 /* Need to read HA REG for FCP ring and other ring events */
James Smart9940b972011-03-11 16:06:12 -050012024 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12025 return IRQ_HANDLED;
James Smart93996272008-08-24 21:50:30 -040012026 /* Clear up only attention source related to fast-path */
James Smart5b75da22008-12-04 22:39:35 -050012027 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012028 /*
12029 * If there is deferred error attention, do not check for
12030 * any interrupt.
12031 */
12032 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040012033 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012034 return IRQ_NONE;
12035 }
James Smart93996272008-08-24 21:50:30 -040012036 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12037 phba->HAregaddr);
12038 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050012039 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040012040 } else
12041 ha_copy = phba->ha_copy;
12042
12043 /*
12044 * Process all events on FCP ring. Take the optimized path for FCP IO.
12045 */
12046 ha_copy &= ~(phba->work_ha_mask);
12047
12048 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea31012005-04-17 16:05:31 -050012049 status >>= (4*LPFC_FCP_RING);
James Smart895427b2017-02-12 13:52:30 -080012050 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
James Smart858c9f62007-06-17 19:56:39 -050012051 if (status & HA_RXMASK)
James Smart895427b2017-02-12 13:52:30 -080012052 lpfc_sli_handle_fast_ring_event(phba, pring, status);
James Smarta4bc3372006-12-02 13:34:16 -050012053
12054 if (phba->cfg_multi_ring_support == 2) {
12055 /*
James Smart93996272008-08-24 21:50:30 -040012056 * Process all events on extra ring. Take the optimized path
12057 * for extra ring IO.
James Smarta4bc3372006-12-02 13:34:16 -050012058 */
James Smart93996272008-08-24 21:50:30 -040012059 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
James Smarta4bc3372006-12-02 13:34:16 -050012060 status >>= (4*LPFC_EXTRA_RING);
James Smart858c9f62007-06-17 19:56:39 -050012061 if (status & HA_RXMASK) {
James Smarta4bc3372006-12-02 13:34:16 -050012062 lpfc_sli_handle_fast_ring_event(phba,
James Smart895427b2017-02-12 13:52:30 -080012063 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
James Smarta4bc3372006-12-02 13:34:16 -050012064 status);
12065 }
12066 }
dea31012005-04-17 16:05:31 -050012067 return IRQ_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040012068} /* lpfc_sli_fp_intr_handler */
dea31012005-04-17 16:05:31 -050012069
James Smart93996272008-08-24 21:50:30 -040012070/**
James Smart3772a992009-05-22 14:50:54 -040012071 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
James Smart93996272008-08-24 21:50:30 -040012072 * @irq: Interrupt number.
12073 * @dev_id: The device context pointer.
12074 *
James Smart3772a992009-05-22 14:50:54 -040012075 * This function is the HBA device-level interrupt handler to device with
12076 * SLI-3 interface spec, called from the PCI layer when either MSI or
12077 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12078 * requires driver attention. This function invokes the slow-path interrupt
12079 * attention handling function and fast-path interrupt attention handling
12080 * function in turn to process the relevant HBA attention events. This
12081 * function is called without any lock held. It gets the hbalock to access
12082 * and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040012083 *
12084 * This function returns IRQ_HANDLED when interrupt is handled, else it
12085 * returns IRQ_NONE.
12086 **/
12087irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040012088lpfc_sli_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040012089{
12090 struct lpfc_hba *phba;
12091 irqreturn_t sp_irq_rc, fp_irq_rc;
12092 unsigned long status1, status2;
James Smarta747c9c2009-11-18 15:41:10 -050012093 uint32_t hc_copy;
James Smart93996272008-08-24 21:50:30 -040012094
12095 /*
12096 * Get the driver's phba structure from the dev_id and
12097 * assume the HBA is not interrupting.
12098 */
12099 phba = (struct lpfc_hba *) dev_id;
12100
12101 if (unlikely(!phba))
12102 return IRQ_NONE;
12103
James Smart3772a992009-05-22 14:50:54 -040012104 /* Check device state for handling interrupt */
12105 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012106 return IRQ_NONE;
12107
12108 spin_lock(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -050012109 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12110 spin_unlock(&phba->hbalock);
12111 return IRQ_HANDLED;
12112 }
12113
James Smart93996272008-08-24 21:50:30 -040012114 if (unlikely(!phba->ha_copy)) {
12115 spin_unlock(&phba->hbalock);
12116 return IRQ_NONE;
12117 } else if (phba->ha_copy & HA_ERATT) {
12118 if (phba->hba_flag & HBA_ERATT_HANDLED)
12119 /* ERATT polling has handled ERATT */
12120 phba->ha_copy &= ~HA_ERATT;
12121 else
12122 /* Indicate interrupt handler handles ERATT */
12123 phba->hba_flag |= HBA_ERATT_HANDLED;
12124 }
12125
James Smarta257bf92009-04-06 18:48:10 -040012126 /*
12127 * If there is deferred error attention, do not check for any interrupt.
12128 */
12129 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020012130 spin_unlock(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -040012131 return IRQ_NONE;
12132 }
12133
James Smart93996272008-08-24 21:50:30 -040012134 /* Clear attention sources except link and error attentions */
James Smart9940b972011-03-11 16:06:12 -050012135 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12136 spin_unlock(&phba->hbalock);
12137 return IRQ_HANDLED;
12138 }
James Smarta747c9c2009-11-18 15:41:10 -050012139 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12140 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12141 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012142 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050012143 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012144 readl(phba->HAregaddr); /* flush */
12145 spin_unlock(&phba->hbalock);
12146
12147 /*
12148 * Invokes slow-path host attention interrupt handling as appropriate.
12149 */
12150
12151 /* status of events with mailbox and link attention */
12152 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12153
12154 /* status of events with ELS ring */
12155 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12156 status2 >>= (4*LPFC_ELS_RING);
12157
12158 if (status1 || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040012159 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040012160 else
12161 sp_irq_rc = IRQ_NONE;
12162
12163 /*
12164 * Invoke fast-path host attention interrupt handling as appropriate.
12165 */
12166
12167 /* status of events with FCP ring */
12168 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12169 status1 >>= (4*LPFC_FCP_RING);
12170
12171 /* status of events with extra ring */
12172 if (phba->cfg_multi_ring_support == 2) {
12173 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12174 status2 >>= (4*LPFC_EXTRA_RING);
12175 } else
12176 status2 = 0;
12177
12178 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040012179 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040012180 else
12181 fp_irq_rc = IRQ_NONE;
12182
12183 /* Return device-level interrupt handling status */
12184 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
James Smart3772a992009-05-22 14:50:54 -040012185} /* lpfc_sli_intr_handler */
James Smart4f774512009-05-22 14:52:35 -040012186
12187/**
12188 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12189 * @phba: pointer to lpfc hba data structure.
12190 *
12191 * This routine is invoked by the worker thread to process all the pending
12192 * SLI4 FCP abort XRI events.
12193 **/
12194void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12195{
12196 struct lpfc_cq_event *cq_event;
12197
12198 /* First, declare the fcp xri abort event has been handled */
12199 spin_lock_irq(&phba->hbalock);
12200 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12201 spin_unlock_irq(&phba->hbalock);
12202 /* Now, handle all the fcp xri abort events */
12203 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12204 /* Get the first event from the head of the event queue */
12205 spin_lock_irq(&phba->hbalock);
12206 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12207 cq_event, struct lpfc_cq_event, list);
12208 spin_unlock_irq(&phba->hbalock);
12209 /* Notify aborted XRI for FCP work queue */
12210 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12211 /* Free the event processed back to the free pool */
12212 lpfc_sli4_cq_event_release(phba, cq_event);
12213 }
12214}
12215
12216/**
James Smart318083a2017-03-04 09:30:30 -080012217 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12218 * @phba: pointer to lpfc hba data structure.
12219 *
12220 * This routine is invoked by the worker thread to process all the pending
12221 * SLI4 NVME abort XRI events.
12222 **/
12223void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12224{
12225 struct lpfc_cq_event *cq_event;
12226
12227 /* First, declare the fcp xri abort event has been handled */
12228 spin_lock_irq(&phba->hbalock);
12229 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12230 spin_unlock_irq(&phba->hbalock);
12231 /* Now, handle all the fcp xri abort events */
12232 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12233 /* Get the first event from the head of the event queue */
12234 spin_lock_irq(&phba->hbalock);
12235 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12236 cq_event, struct lpfc_cq_event, list);
12237 spin_unlock_irq(&phba->hbalock);
12238 /* Notify aborted XRI for NVME work queue */
12239 if (phba->nvmet_support) {
12240 lpfc_sli4_nvmet_xri_aborted(phba,
12241 &cq_event->cqe.wcqe_axri);
12242 } else {
12243 lpfc_sli4_nvme_xri_aborted(phba,
12244 &cq_event->cqe.wcqe_axri);
12245 }
12246 /* Free the event processed back to the free pool */
12247 lpfc_sli4_cq_event_release(phba, cq_event);
12248 }
12249}
12250
12251/**
James Smart4f774512009-05-22 14:52:35 -040012252 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12253 * @phba: pointer to lpfc hba data structure.
12254 *
12255 * This routine is invoked by the worker thread to process all the pending
12256 * SLI4 els abort xri events.
12257 **/
12258void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12259{
12260 struct lpfc_cq_event *cq_event;
12261
12262 /* First, declare the els xri abort event has been handled */
12263 spin_lock_irq(&phba->hbalock);
12264 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12265 spin_unlock_irq(&phba->hbalock);
12266 /* Now, handle all the els xri abort events */
12267 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12268 /* Get the first event from the head of the event queue */
12269 spin_lock_irq(&phba->hbalock);
12270 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12271 cq_event, struct lpfc_cq_event, list);
12272 spin_unlock_irq(&phba->hbalock);
12273 /* Notify aborted XRI for ELS work queue */
12274 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12275 /* Free the event processed back to the free pool */
12276 lpfc_sli4_cq_event_release(phba, cq_event);
12277 }
12278}
12279
James Smart341af102010-01-26 23:07:37 -050012280/**
12281 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12282 * @phba: pointer to lpfc hba data structure
12283 * @pIocbIn: pointer to the rspiocbq
12284 * @pIocbOut: pointer to the cmdiocbq
12285 * @wcqe: pointer to the complete wcqe
12286 *
12287 * This routine transfers the fields of a command iocbq to a response iocbq
12288 * by copying all the IOCB fields from command iocbq and transferring the
12289 * completion status information from the complete wcqe.
12290 **/
James Smart4f774512009-05-22 14:52:35 -040012291static void
James Smart341af102010-01-26 23:07:37 -050012292lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12293 struct lpfc_iocbq *pIocbIn,
James Smart4f774512009-05-22 14:52:35 -040012294 struct lpfc_iocbq *pIocbOut,
12295 struct lpfc_wcqe_complete *wcqe)
12296{
James Smartaf227412013-10-10 12:23:10 -040012297 int numBdes, i;
James Smart341af102010-01-26 23:07:37 -050012298 unsigned long iflags;
James Smartaf227412013-10-10 12:23:10 -040012299 uint32_t status, max_response;
12300 struct lpfc_dmabuf *dmabuf;
12301 struct ulp_bde64 *bpl, bde;
James Smart4f774512009-05-22 14:52:35 -040012302 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12303
12304 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12305 sizeof(struct lpfc_iocbq) - offset);
James Smart4f774512009-05-22 14:52:35 -040012306 /* Map WCQE parameters into irspiocb parameters */
James Smartacd68592012-01-18 16:25:09 -050012307 status = bf_get(lpfc_wcqe_c_status, wcqe);
12308 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
James Smart4f774512009-05-22 14:52:35 -040012309 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12310 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12311 pIocbIn->iocb.un.fcpi.fcpi_parm =
12312 pIocbOut->iocb.un.fcpi.fcpi_parm -
12313 wcqe->total_data_placed;
12314 else
12315 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smart695a8142010-01-26 23:08:03 -050012316 else {
James Smart4f774512009-05-22 14:52:35 -040012317 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smartaf227412013-10-10 12:23:10 -040012318 switch (pIocbOut->iocb.ulpCommand) {
12319 case CMD_ELS_REQUEST64_CR:
12320 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12321 bpl = (struct ulp_bde64 *)dmabuf->virt;
12322 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12323 max_response = bde.tus.f.bdeSize;
12324 break;
12325 case CMD_GEN_REQUEST64_CR:
12326 max_response = 0;
12327 if (!pIocbOut->context3)
12328 break;
12329 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12330 sizeof(struct ulp_bde64);
12331 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12332 bpl = (struct ulp_bde64 *)dmabuf->virt;
12333 for (i = 0; i < numBdes; i++) {
12334 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12335 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12336 max_response += bde.tus.f.bdeSize;
12337 }
12338 break;
12339 default:
12340 max_response = wcqe->total_data_placed;
12341 break;
12342 }
12343 if (max_response < wcqe->total_data_placed)
12344 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12345 else
12346 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12347 wcqe->total_data_placed;
James Smart695a8142010-01-26 23:08:03 -050012348 }
James Smart341af102010-01-26 23:07:37 -050012349
James Smartacd68592012-01-18 16:25:09 -050012350 /* Convert BG errors for completion status */
12351 if (status == CQE_STATUS_DI_ERROR) {
12352 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12353
12354 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12355 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12356 else
12357 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12358
12359 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12360 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12361 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12362 BGS_GUARD_ERR_MASK;
12363 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12364 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12365 BGS_APPTAG_ERR_MASK;
12366 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12367 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12368 BGS_REFTAG_ERR_MASK;
12369
12370 /* Check to see if there was any good data before the error */
12371 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12372 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12373 BGS_HI_WATER_MARK_PRESENT_MASK;
12374 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12375 wcqe->total_data_placed;
12376 }
12377
12378 /*
12379 * Set ALL the error bits to indicate we don't know what
12380 * type of error it is.
12381 */
12382 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12383 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12384 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12385 BGS_GUARD_ERR_MASK);
12386 }
12387
James Smart341af102010-01-26 23:07:37 -050012388 /* Pick up HBA exchange busy condition */
12389 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12390 spin_lock_irqsave(&phba->hbalock, iflags);
12391 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12392 spin_unlock_irqrestore(&phba->hbalock, iflags);
12393 }
James Smart4f774512009-05-22 14:52:35 -040012394}
12395
12396/**
James Smart45ed1192009-10-02 15:17:02 -040012397 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12398 * @phba: Pointer to HBA context object.
12399 * @wcqe: Pointer to work-queue completion queue entry.
12400 *
12401 * This routine handles an ELS work-queue completion event and construct
12402 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12403 * discovery engine to handle.
12404 *
12405 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12406 **/
12407static struct lpfc_iocbq *
12408lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12409 struct lpfc_iocbq *irspiocbq)
12410{
James Smart895427b2017-02-12 13:52:30 -080012411 struct lpfc_sli_ring *pring;
James Smart45ed1192009-10-02 15:17:02 -040012412 struct lpfc_iocbq *cmdiocbq;
12413 struct lpfc_wcqe_complete *wcqe;
12414 unsigned long iflags;
12415
James Smart895427b2017-02-12 13:52:30 -080012416 pring = lpfc_phba_elsring(phba);
12417
James Smart45ed1192009-10-02 15:17:02 -040012418 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
James Smart7e56aa22012-08-03 12:35:34 -040012419 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart45ed1192009-10-02 15:17:02 -040012420 pring->stats.iocb_event++;
12421 /* Look up the ELS command IOCB and create pseudo response IOCB */
12422 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12423 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart89533e92016-10-13 15:06:15 -070012424 /* Put the iocb back on the txcmplq */
12425 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
James Smart7e56aa22012-08-03 12:35:34 -040012426 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart45ed1192009-10-02 15:17:02 -040012427
12428 if (unlikely(!cmdiocbq)) {
12429 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12430 "0386 ELS complete with no corresponding "
12431 "cmdiocb: iotag (%d)\n",
12432 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12433 lpfc_sli_release_iocbq(phba, irspiocbq);
12434 return NULL;
12435 }
12436
12437 /* Fake the irspiocbq and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050012438 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
James Smart45ed1192009-10-02 15:17:02 -040012439
12440 return irspiocbq;
12441}
12442
12443/**
James Smart04c68492009-05-22 14:52:52 -040012444 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12445 * @phba: Pointer to HBA context object.
12446 * @cqe: Pointer to mailbox completion queue entry.
12447 *
12448 * This routine process a mailbox completion queue entry with asynchrous
12449 * event.
12450 *
12451 * Return: true if work posted to worker thread, otherwise false.
12452 **/
12453static bool
12454lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12455{
12456 struct lpfc_cq_event *cq_event;
12457 unsigned long iflags;
12458
12459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12460 "0392 Async Event: word0:x%x, word1:x%x, "
12461 "word2:x%x, word3:x%x\n", mcqe->word0,
12462 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12463
12464 /* Allocate a new internal CQ_EVENT entry */
12465 cq_event = lpfc_sli4_cq_event_alloc(phba);
12466 if (!cq_event) {
12467 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12468 "0394 Failed to allocate CQ_EVENT entry\n");
12469 return false;
12470 }
12471
12472 /* Move the CQE into an asynchronous event entry */
12473 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12474 spin_lock_irqsave(&phba->hbalock, iflags);
12475 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12476 /* Set the async event flag */
12477 phba->hba_flag |= ASYNC_EVENT;
12478 spin_unlock_irqrestore(&phba->hbalock, iflags);
12479
12480 return true;
12481}
12482
12483/**
12484 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12485 * @phba: Pointer to HBA context object.
12486 * @cqe: Pointer to mailbox completion queue entry.
12487 *
12488 * This routine process a mailbox completion queue entry with mailbox
12489 * completion event.
12490 *
12491 * Return: true if work posted to worker thread, otherwise false.
12492 **/
12493static bool
12494lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12495{
12496 uint32_t mcqe_status;
12497 MAILBOX_t *mbox, *pmbox;
12498 struct lpfc_mqe *mqe;
12499 struct lpfc_vport *vport;
12500 struct lpfc_nodelist *ndlp;
12501 struct lpfc_dmabuf *mp;
12502 unsigned long iflags;
12503 LPFC_MBOXQ_t *pmb;
12504 bool workposted = false;
12505 int rc;
12506
12507 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12508 if (!bf_get(lpfc_trailer_completed, mcqe))
12509 goto out_no_mqe_complete;
12510
12511 /* Get the reference to the active mbox command */
12512 spin_lock_irqsave(&phba->hbalock, iflags);
12513 pmb = phba->sli.mbox_active;
12514 if (unlikely(!pmb)) {
12515 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12516 "1832 No pending MBOX command to handle\n");
12517 spin_unlock_irqrestore(&phba->hbalock, iflags);
12518 goto out_no_mqe_complete;
12519 }
12520 spin_unlock_irqrestore(&phba->hbalock, iflags);
12521 mqe = &pmb->u.mqe;
12522 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12523 mbox = phba->mbox;
12524 vport = pmb->vport;
12525
12526 /* Reset heartbeat timer */
12527 phba->last_completion_time = jiffies;
12528 del_timer(&phba->sli.mbox_tmo);
12529
12530 /* Move mbox data to caller's mailbox region, do endian swapping */
12531 if (pmb->mbox_cmpl && mbox)
12532 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
James Smart04c68492009-05-22 14:52:52 -040012533
James Smart73d91e52011-10-10 21:32:10 -040012534 /*
12535 * For mcqe errors, conditionally move a modified error code to
12536 * the mbox so that the error will not be missed.
12537 */
12538 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12539 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12540 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12541 bf_set(lpfc_mqe_status, mqe,
12542 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12543 }
James Smart04c68492009-05-22 14:52:52 -040012544 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12545 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12546 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12547 "MBOX dflt rpi: status:x%x rpi:x%x",
12548 mcqe_status,
12549 pmbox->un.varWords[0], 0);
12550 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12551 mp = (struct lpfc_dmabuf *)(pmb->context1);
12552 ndlp = (struct lpfc_nodelist *)pmb->context2;
12553 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12554 * RID of the PPI using the same mbox buffer.
12555 */
12556 lpfc_unreg_login(phba, vport->vpi,
12557 pmbox->un.varWords[0], pmb);
12558 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12559 pmb->context1 = mp;
12560 pmb->context2 = ndlp;
12561 pmb->vport = vport;
12562 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12563 if (rc != MBX_BUSY)
12564 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12565 LOG_SLI, "0385 rc should "
12566 "have been MBX_BUSY\n");
12567 if (rc != MBX_NOT_FINISHED)
12568 goto send_current_mbox;
12569 }
12570 }
12571 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12572 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12573 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12574
12575 /* There is mailbox completion work to do */
12576 spin_lock_irqsave(&phba->hbalock, iflags);
12577 __lpfc_mbox_cmpl_put(phba, pmb);
12578 phba->work_ha |= HA_MBATT;
12579 spin_unlock_irqrestore(&phba->hbalock, iflags);
12580 workposted = true;
12581
12582send_current_mbox:
12583 spin_lock_irqsave(&phba->hbalock, iflags);
12584 /* Release the mailbox command posting token */
12585 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12586 /* Setting active mailbox pointer need to be in sync to flag clear */
12587 phba->sli.mbox_active = NULL;
12588 spin_unlock_irqrestore(&phba->hbalock, iflags);
12589 /* Wake up worker thread to post the next pending mailbox command */
12590 lpfc_worker_wake_up(phba);
12591out_no_mqe_complete:
12592 if (bf_get(lpfc_trailer_consumed, mcqe))
12593 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12594 return workposted;
12595}
12596
12597/**
12598 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12599 * @phba: Pointer to HBA context object.
12600 * @cqe: Pointer to mailbox completion queue entry.
12601 *
12602 * This routine process a mailbox completion queue entry, it invokes the
12603 * proper mailbox complete handling or asynchrous event handling routine
12604 * according to the MCQE's async bit.
12605 *
12606 * Return: true if work posted to worker thread, otherwise false.
12607 **/
12608static bool
12609lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12610{
12611 struct lpfc_mcqe mcqe;
12612 bool workposted;
12613
12614 /* Copy the mailbox MCQE and convert endian order as needed */
12615 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12616
12617 /* Invoke the proper event handling routine */
12618 if (!bf_get(lpfc_trailer_async, &mcqe))
12619 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12620 else
12621 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12622 return workposted;
12623}
12624
12625/**
James Smart4f774512009-05-22 14:52:35 -040012626 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12627 * @phba: Pointer to HBA context object.
James Smart2a76a282012-08-03 12:35:54 -040012628 * @cq: Pointer to associated CQ
James Smart4f774512009-05-22 14:52:35 -040012629 * @wcqe: Pointer to work-queue completion queue entry.
12630 *
12631 * This routine handles an ELS work-queue completion event.
12632 *
12633 * Return: true if work posted to worker thread, otherwise false.
12634 **/
12635static bool
James Smart2a76a282012-08-03 12:35:54 -040012636lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040012637 struct lpfc_wcqe_complete *wcqe)
12638{
James Smart4f774512009-05-22 14:52:35 -040012639 struct lpfc_iocbq *irspiocbq;
12640 unsigned long iflags;
James Smart2a76a282012-08-03 12:35:54 -040012641 struct lpfc_sli_ring *pring = cq->pring;
James Smart0e9bb8d2013-03-01 16:35:12 -050012642 int txq_cnt = 0;
12643 int txcmplq_cnt = 0;
12644 int fcp_txcmplq_cnt = 0;
James Smart4f774512009-05-22 14:52:35 -040012645
James Smart45ed1192009-10-02 15:17:02 -040012646 /* Get an irspiocbq for later ELS response processing use */
James Smart4f774512009-05-22 14:52:35 -040012647 irspiocbq = lpfc_sli_get_iocbq(phba);
12648 if (!irspiocbq) {
James Smart0e9bb8d2013-03-01 16:35:12 -050012649 if (!list_empty(&pring->txq))
12650 txq_cnt++;
12651 if (!list_empty(&pring->txcmplq))
12652 txcmplq_cnt++;
James Smart4f774512009-05-22 14:52:35 -040012653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2a9bf3d2010-06-07 15:24:45 -040012654 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12655 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
James Smart0e9bb8d2013-03-01 16:35:12 -050012656 txq_cnt, phba->iocb_cnt,
12657 fcp_txcmplq_cnt,
12658 txcmplq_cnt);
James Smart45ed1192009-10-02 15:17:02 -040012659 return false;
James Smart4f774512009-05-22 14:52:35 -040012660 }
James Smart4f774512009-05-22 14:52:35 -040012661
James Smart45ed1192009-10-02 15:17:02 -040012662 /* Save off the slow-path queue event for work thread to process */
12663 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
James Smart4f774512009-05-22 14:52:35 -040012664 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart4d9ab992009-10-02 15:16:39 -040012665 list_add_tail(&irspiocbq->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040012666 &phba->sli4_hba.sp_queue_event);
12667 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4f774512009-05-22 14:52:35 -040012668 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart4f774512009-05-22 14:52:35 -040012669
James Smart45ed1192009-10-02 15:17:02 -040012670 return true;
James Smart4f774512009-05-22 14:52:35 -040012671}
12672
12673/**
12674 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12675 * @phba: Pointer to HBA context object.
12676 * @wcqe: Pointer to work-queue completion queue entry.
12677 *
Masahiro Yamada3f8b6fb2017-02-27 14:29:25 -080012678 * This routine handles slow-path WQ entry consumed event by invoking the
James Smart4f774512009-05-22 14:52:35 -040012679 * proper WQ release routine to the slow-path WQ.
12680 **/
12681static void
12682lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12683 struct lpfc_wcqe_release *wcqe)
12684{
James Smart2e90f4b2011-12-13 13:22:37 -050012685 /* sanity check on queue memory */
12686 if (unlikely(!phba->sli4_hba.els_wq))
12687 return;
James Smart4f774512009-05-22 14:52:35 -040012688 /* Check for the slow-path ELS work queue */
12689 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12690 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12691 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12692 else
12693 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12694 "2579 Slow-path wqe consume event carries "
12695 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12696 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12697 phba->sli4_hba.els_wq->queue_id);
12698}
12699
12700/**
12701 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12702 * @phba: Pointer to HBA context object.
12703 * @cq: Pointer to a WQ completion queue.
12704 * @wcqe: Pointer to work-queue completion queue entry.
12705 *
12706 * This routine handles an XRI abort event.
12707 *
12708 * Return: true if work posted to worker thread, otherwise false.
12709 **/
12710static bool
12711lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12712 struct lpfc_queue *cq,
12713 struct sli4_wcqe_xri_aborted *wcqe)
12714{
12715 bool workposted = false;
12716 struct lpfc_cq_event *cq_event;
12717 unsigned long iflags;
12718
12719 /* Allocate a new internal CQ_EVENT entry */
12720 cq_event = lpfc_sli4_cq_event_alloc(phba);
12721 if (!cq_event) {
12722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12723 "0602 Failed to allocate CQ_EVENT entry\n");
12724 return false;
12725 }
12726
12727 /* Move the CQE into the proper xri abort event list */
12728 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12729 switch (cq->subtype) {
12730 case LPFC_FCP:
12731 spin_lock_irqsave(&phba->hbalock, iflags);
12732 list_add_tail(&cq_event->list,
12733 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12734 /* Set the fcp xri abort event flag */
12735 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12736 spin_unlock_irqrestore(&phba->hbalock, iflags);
12737 workposted = true;
12738 break;
12739 case LPFC_ELS:
12740 spin_lock_irqsave(&phba->hbalock, iflags);
12741 list_add_tail(&cq_event->list,
12742 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12743 /* Set the els xri abort event flag */
12744 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12745 spin_unlock_irqrestore(&phba->hbalock, iflags);
12746 workposted = true;
12747 break;
James Smart318083a2017-03-04 09:30:30 -080012748 case LPFC_NVME:
12749 spin_lock_irqsave(&phba->hbalock, iflags);
12750 list_add_tail(&cq_event->list,
12751 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
12752 /* Set the nvme xri abort event flag */
12753 phba->hba_flag |= NVME_XRI_ABORT_EVENT;
12754 spin_unlock_irqrestore(&phba->hbalock, iflags);
12755 workposted = true;
12756 break;
James Smart4f774512009-05-22 14:52:35 -040012757 default:
12758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart318083a2017-03-04 09:30:30 -080012759 "0603 Invalid CQ subtype %d: "
12760 "%08x %08x %08x %08x\n",
12761 cq->subtype, wcqe->word0, wcqe->parameter,
12762 wcqe->word2, wcqe->word3);
12763 lpfc_sli4_cq_event_release(phba, cq_event);
James Smart4f774512009-05-22 14:52:35 -040012764 workposted = false;
12765 break;
12766 }
12767 return workposted;
12768}
12769
12770/**
James Smart4d9ab992009-10-02 15:16:39 -040012771 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
James Smart4f774512009-05-22 14:52:35 -040012772 * @phba: Pointer to HBA context object.
James Smart4d9ab992009-10-02 15:16:39 -040012773 * @rcqe: Pointer to receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040012774 *
James Smart4d9ab992009-10-02 15:16:39 -040012775 * This routine process a receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040012776 *
12777 * Return: true if work posted to worker thread, otherwise false.
12778 **/
12779static bool
James Smart4d9ab992009-10-02 15:16:39 -040012780lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12781{
12782 bool workposted = false;
James Smart895427b2017-02-12 13:52:30 -080012783 struct fc_frame_header *fc_hdr;
James Smart4d9ab992009-10-02 15:16:39 -040012784 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12785 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12786 struct hbq_dmabuf *dma_buf;
James Smart7851fe22011-07-22 18:36:52 -040012787 uint32_t status, rq_id;
James Smart4d9ab992009-10-02 15:16:39 -040012788 unsigned long iflags;
12789
James Smart2e90f4b2011-12-13 13:22:37 -050012790 /* sanity check on queue memory */
12791 if (unlikely(!hrq) || unlikely(!drq))
12792 return workposted;
12793
James Smart7851fe22011-07-22 18:36:52 -040012794 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12795 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12796 else
12797 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12798 if (rq_id != hrq->queue_id)
James Smart4d9ab992009-10-02 15:16:39 -040012799 goto out;
12800
12801 status = bf_get(lpfc_rcqe_status, rcqe);
12802 switch (status) {
12803 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12805 "2537 Receive Frame Truncated!!\n");
James Smartb84daac2012-08-03 12:35:13 -040012806 hrq->RQ_buf_trunc++;
James Smart4d9ab992009-10-02 15:16:39 -040012807 case FC_STATUS_RQ_SUCCESS:
James Smart5ffc2662009-11-18 15:39:44 -050012808 lpfc_sli4_rq_release(hrq, drq);
James Smart4d9ab992009-10-02 15:16:39 -040012809 spin_lock_irqsave(&phba->hbalock, iflags);
12810 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12811 if (!dma_buf) {
James Smartb84daac2012-08-03 12:35:13 -040012812 hrq->RQ_no_buf_found++;
James Smart4d9ab992009-10-02 15:16:39 -040012813 spin_unlock_irqrestore(&phba->hbalock, iflags);
12814 goto out;
12815 }
James Smartb84daac2012-08-03 12:35:13 -040012816 hrq->RQ_rcv_buf++;
James Smart4d9ab992009-10-02 15:16:39 -040012817 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
James Smart895427b2017-02-12 13:52:30 -080012818
12819 /* If a NVME LS event (type 0x28), treat it as Fast path */
12820 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
12821
James Smart4d9ab992009-10-02 15:16:39 -040012822 /* save off the frame for the word thread to process */
12823 list_add_tail(&dma_buf->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040012824 &phba->sli4_hba.sp_queue_event);
James Smart4d9ab992009-10-02 15:16:39 -040012825 /* Frame received */
James Smart45ed1192009-10-02 15:17:02 -040012826 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4d9ab992009-10-02 15:16:39 -040012827 spin_unlock_irqrestore(&phba->hbalock, iflags);
12828 workposted = true;
12829 break;
12830 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12831 case FC_STATUS_INSUFF_BUF_FRM_DISC:
James Smartb84daac2012-08-03 12:35:13 -040012832 hrq->RQ_no_posted_buf++;
James Smart4d9ab992009-10-02 15:16:39 -040012833 /* Post more buffers if possible */
12834 spin_lock_irqsave(&phba->hbalock, iflags);
12835 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12836 spin_unlock_irqrestore(&phba->hbalock, iflags);
12837 workposted = true;
12838 break;
12839 }
12840out:
12841 return workposted;
James Smart4d9ab992009-10-02 15:16:39 -040012842}
12843
12844/**
12845 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12846 * @phba: Pointer to HBA context object.
12847 * @cq: Pointer to the completion queue.
12848 * @wcqe: Pointer to a completion queue entry.
12849 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012850 * This routine process a slow-path work-queue or receive queue completion queue
James Smart4d9ab992009-10-02 15:16:39 -040012851 * entry.
12852 *
12853 * Return: true if work posted to worker thread, otherwise false.
12854 **/
12855static bool
12856lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040012857 struct lpfc_cqe *cqe)
12858{
James Smart45ed1192009-10-02 15:17:02 -040012859 struct lpfc_cqe cqevt;
James Smart4f774512009-05-22 14:52:35 -040012860 bool workposted = false;
12861
12862 /* Copy the work queue CQE and convert endian order if needed */
James Smart45ed1192009-10-02 15:17:02 -040012863 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040012864
12865 /* Check and process for different type of WCQE and dispatch */
James Smart45ed1192009-10-02 15:17:02 -040012866 switch (bf_get(lpfc_cqe_code, &cqevt)) {
James Smart4f774512009-05-22 14:52:35 -040012867 case CQE_CODE_COMPL_WQE:
James Smart45ed1192009-10-02 15:17:02 -040012868 /* Process the WQ/RQ complete event */
James Smartbc739052010-08-04 16:11:18 -040012869 phba->last_completion_time = jiffies;
James Smart2a76a282012-08-03 12:35:54 -040012870 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040012871 (struct lpfc_wcqe_complete *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040012872 break;
12873 case CQE_CODE_RELEASE_WQE:
12874 /* Process the WQ release event */
12875 lpfc_sli4_sp_handle_rel_wcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040012876 (struct lpfc_wcqe_release *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040012877 break;
12878 case CQE_CODE_XRI_ABORTED:
12879 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040012880 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040012881 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040012882 (struct sli4_wcqe_xri_aborted *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040012883 break;
James Smart4d9ab992009-10-02 15:16:39 -040012884 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -040012885 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -040012886 /* Process the RQ event */
James Smartbc739052010-08-04 16:11:18 -040012887 phba->last_completion_time = jiffies;
James Smart4d9ab992009-10-02 15:16:39 -040012888 workposted = lpfc_sli4_sp_handle_rcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040012889 (struct lpfc_rcqe *)&cqevt);
James Smart4d9ab992009-10-02 15:16:39 -040012890 break;
James Smart4f774512009-05-22 14:52:35 -040012891 default:
12892 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12893 "0388 Not a valid WCQE code: x%x\n",
James Smart45ed1192009-10-02 15:17:02 -040012894 bf_get(lpfc_cqe_code, &cqevt));
James Smart4f774512009-05-22 14:52:35 -040012895 break;
12896 }
12897 return workposted;
12898}
12899
12900/**
James Smart4f774512009-05-22 14:52:35 -040012901 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12902 * @phba: Pointer to HBA context object.
12903 * @eqe: Pointer to fast-path event queue entry.
12904 *
12905 * This routine process a event queue entry from the slow-path event queue.
12906 * It will check the MajorCode and MinorCode to determine this is for a
12907 * completion event on a completion queue, if not, an error shall be logged
12908 * and just return. Otherwise, it will get to the corresponding completion
12909 * queue and process all the entries on that completion queue, rearm the
12910 * completion queue, and then return.
12911 *
12912 **/
12913static void
James Smart67d12732012-08-03 12:36:13 -040012914lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12915 struct lpfc_queue *speq)
James Smart4f774512009-05-22 14:52:35 -040012916{
James Smart67d12732012-08-03 12:36:13 -040012917 struct lpfc_queue *cq = NULL, *childq;
James Smart4f774512009-05-22 14:52:35 -040012918 struct lpfc_cqe *cqe;
12919 bool workposted = false;
12920 int ecount = 0;
12921 uint16_t cqid;
12922
James Smart4f774512009-05-22 14:52:35 -040012923 /* Get the reference to the corresponding CQ */
James Smartcb5172e2010-03-15 11:25:07 -040012924 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
James Smart4f774512009-05-22 14:52:35 -040012925
James Smart4f774512009-05-22 14:52:35 -040012926 list_for_each_entry(childq, &speq->child_list, list) {
12927 if (childq->queue_id == cqid) {
12928 cq = childq;
12929 break;
12930 }
12931 }
12932 if (unlikely(!cq)) {
James Smart75baf692010-06-08 18:31:21 -040012933 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12934 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12935 "0365 Slow-path CQ identifier "
12936 "(%d) does not exist\n", cqid);
James Smart4f774512009-05-22 14:52:35 -040012937 return;
12938 }
12939
James Smart895427b2017-02-12 13:52:30 -080012940 /* Save EQ associated with this CQ */
12941 cq->assoc_qp = speq;
12942
James Smart4f774512009-05-22 14:52:35 -040012943 /* Process all the entries to the CQ */
12944 switch (cq->type) {
12945 case LPFC_MCQ:
12946 while ((cqe = lpfc_sli4_cq_get(cq))) {
12947 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
James Smart73d91e52011-10-10 21:32:10 -040012948 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040012949 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
James Smartb84daac2012-08-03 12:35:13 -040012950 cq->CQ_mbox++;
James Smart4f774512009-05-22 14:52:35 -040012951 }
12952 break;
12953 case LPFC_WCQ:
12954 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart895427b2017-02-12 13:52:30 -080012955 if ((cq->subtype == LPFC_FCP) ||
12956 (cq->subtype == LPFC_NVME))
12957 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
James Smart05580562011-05-24 11:40:48 -040012958 cqe);
12959 else
12960 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12961 cqe);
James Smart73d91e52011-10-10 21:32:10 -040012962 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040012963 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12964 }
James Smartb84daac2012-08-03 12:35:13 -040012965
12966 /* Track the max number of CQEs processed in 1 EQ */
12967 if (ecount > cq->CQ_max_cqe)
12968 cq->CQ_max_cqe = ecount;
James Smart4f774512009-05-22 14:52:35 -040012969 break;
12970 default:
12971 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12972 "0370 Invalid completion queue type (%d)\n",
12973 cq->type);
12974 return;
12975 }
12976
12977 /* Catch the no cq entry condition, log an error */
12978 if (unlikely(ecount == 0))
12979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12980 "0371 No entry from the CQ: identifier "
12981 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12982
12983 /* In any case, flash and re-arm the RCQ */
12984 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12985
12986 /* wake up worker thread if there are works to be done */
12987 if (workposted)
12988 lpfc_worker_wake_up(phba);
12989}
12990
12991/**
12992 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
James Smart2a76a282012-08-03 12:35:54 -040012993 * @phba: Pointer to HBA context object.
12994 * @cq: Pointer to associated CQ
12995 * @wcqe: Pointer to work-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040012996 *
12997 * This routine process a fast-path work queue completion entry from fast-path
12998 * event queue for FCP command response completion.
12999 **/
13000static void
James Smart2a76a282012-08-03 12:35:54 -040013001lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013002 struct lpfc_wcqe_complete *wcqe)
13003{
James Smart2a76a282012-08-03 12:35:54 -040013004 struct lpfc_sli_ring *pring = cq->pring;
James Smart4f774512009-05-22 14:52:35 -040013005 struct lpfc_iocbq *cmdiocbq;
13006 struct lpfc_iocbq irspiocbq;
13007 unsigned long iflags;
13008
James Smart4f774512009-05-22 14:52:35 -040013009 /* Check for response status */
13010 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13011 /* If resource errors reported from HBA, reduce queue
13012 * depth of the SCSI device.
13013 */
James Smarte3d2b802012-08-14 14:25:43 -040013014 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13015 IOSTAT_LOCAL_REJECT)) &&
13016 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13017 IOERR_NO_RESOURCES))
James Smart4f774512009-05-22 14:52:35 -040013018 phba->lpfc_rampdown_queue_depth(phba);
James Smarte3d2b802012-08-14 14:25:43 -040013019
James Smart4f774512009-05-22 14:52:35 -040013020 /* Log the error status */
13021 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13022 "0373 FCP complete error: status=x%x, "
13023 "hw_status=x%x, total_data_specified=%d, "
13024 "parameter=x%x, word3=x%x\n",
13025 bf_get(lpfc_wcqe_c_status, wcqe),
13026 bf_get(lpfc_wcqe_c_hw_status, wcqe),
13027 wcqe->total_data_placed, wcqe->parameter,
13028 wcqe->word3);
13029 }
13030
13031 /* Look up the FCP command IOCB and create pseudo response IOCB */
James Smart7e56aa22012-08-03 12:35:34 -040013032 spin_lock_irqsave(&pring->ring_lock, iflags);
13033 pring->stats.iocb_event++;
James Smart4f774512009-05-22 14:52:35 -040013034 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13035 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart7e56aa22012-08-03 12:35:34 -040013036 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013037 if (unlikely(!cmdiocbq)) {
13038 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13039 "0374 FCP complete with no corresponding "
13040 "cmdiocb: iotag (%d)\n",
13041 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13042 return;
13043 }
James Smart895427b2017-02-12 13:52:30 -080013044
13045 if (cq->assoc_qp)
13046 cmdiocbq->isr_timestamp =
13047 cq->assoc_qp->isr_timestamp;
13048
13049 if (cmdiocbq->iocb_cmpl == NULL) {
13050 if (cmdiocbq->wqe_cmpl) {
13051 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13052 spin_lock_irqsave(&phba->hbalock, iflags);
13053 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13054 spin_unlock_irqrestore(&phba->hbalock, iflags);
13055 }
13056
13057 /* Pass the cmd_iocb and the wcqe to the upper layer */
13058 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13059 return;
13060 }
James Smart4f774512009-05-22 14:52:35 -040013061 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13062 "0375 FCP cmdiocb not callback function "
13063 "iotag: (%d)\n",
13064 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13065 return;
13066 }
13067
13068 /* Fake the irspiocb and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050013069 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
James Smart4f774512009-05-22 14:52:35 -040013070
James Smart0f65ff62010-02-26 14:14:23 -050013071 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13072 spin_lock_irqsave(&phba->hbalock, iflags);
13073 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13074 spin_unlock_irqrestore(&phba->hbalock, iflags);
13075 }
13076
James Smart4f774512009-05-22 14:52:35 -040013077 /* Pass the cmd_iocb and the rsp state to the upper layer */
13078 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13079}
13080
13081/**
13082 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13083 * @phba: Pointer to HBA context object.
13084 * @cq: Pointer to completion queue.
13085 * @wcqe: Pointer to work-queue completion queue entry.
13086 *
Masahiro Yamada3f8b6fb2017-02-27 14:29:25 -080013087 * This routine handles an fast-path WQ entry consumed event by invoking the
James Smart4f774512009-05-22 14:52:35 -040013088 * proper WQ release routine to the slow-path WQ.
13089 **/
13090static void
13091lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13092 struct lpfc_wcqe_release *wcqe)
13093{
13094 struct lpfc_queue *childwq;
13095 bool wqid_matched = false;
James Smart895427b2017-02-12 13:52:30 -080013096 uint16_t hba_wqid;
James Smart4f774512009-05-22 14:52:35 -040013097
13098 /* Check for fast-path FCP work queue release */
James Smart895427b2017-02-12 13:52:30 -080013099 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
James Smart4f774512009-05-22 14:52:35 -040013100 list_for_each_entry(childwq, &cq->child_list, list) {
James Smart895427b2017-02-12 13:52:30 -080013101 if (childwq->queue_id == hba_wqid) {
James Smart4f774512009-05-22 14:52:35 -040013102 lpfc_sli4_wq_release(childwq,
13103 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13104 wqid_matched = true;
13105 break;
13106 }
13107 }
13108 /* Report warning log message if no match found */
13109 if (wqid_matched != true)
13110 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13111 "2580 Fast-path wqe consume event carries "
James Smart895427b2017-02-12 13:52:30 -080013112 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
James Smart4f774512009-05-22 14:52:35 -040013113}
13114
13115/**
James Smart2d7dbc42017-02-12 13:52:35 -080013116 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13117 * @phba: Pointer to HBA context object.
13118 * @rcqe: Pointer to receive-queue completion queue entry.
13119 *
13120 * This routine process a receive-queue completion queue entry.
13121 *
13122 * Return: true if work posted to worker thread, otherwise false.
13123 **/
13124static bool
13125lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13126 struct lpfc_rcqe *rcqe)
13127{
13128 bool workposted = false;
13129 struct lpfc_queue *hrq;
13130 struct lpfc_queue *drq;
13131 struct rqb_dmabuf *dma_buf;
13132 struct fc_frame_header *fc_hdr;
13133 uint32_t status, rq_id;
13134 unsigned long iflags;
13135 uint32_t fctl, idx;
13136
13137 if ((phba->nvmet_support == 0) ||
13138 (phba->sli4_hba.nvmet_cqset == NULL))
13139 return workposted;
13140
13141 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13142 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13143 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13144
13145 /* sanity check on queue memory */
13146 if (unlikely(!hrq) || unlikely(!drq))
13147 return workposted;
13148
13149 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13150 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13151 else
13152 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13153
13154 if ((phba->nvmet_support == 0) ||
13155 (rq_id != hrq->queue_id))
13156 return workposted;
13157
13158 status = bf_get(lpfc_rcqe_status, rcqe);
13159 switch (status) {
13160 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13161 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13162 "6126 Receive Frame Truncated!!\n");
13163 hrq->RQ_buf_trunc++;
13164 break;
13165 case FC_STATUS_RQ_SUCCESS:
13166 lpfc_sli4_rq_release(hrq, drq);
13167 spin_lock_irqsave(&phba->hbalock, iflags);
13168 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13169 if (!dma_buf) {
13170 hrq->RQ_no_buf_found++;
13171 spin_unlock_irqrestore(&phba->hbalock, iflags);
13172 goto out;
13173 }
13174 spin_unlock_irqrestore(&phba->hbalock, iflags);
13175 hrq->RQ_rcv_buf++;
13176 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13177
13178 /* Just some basic sanity checks on FCP Command frame */
13179 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13180 fc_hdr->fh_f_ctl[1] << 8 |
13181 fc_hdr->fh_f_ctl[2]);
13182 if (((fctl &
13183 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13184 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13185 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13186 goto drop;
13187
13188 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13189 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
James Smartd613b6a2017-02-12 13:52:37 -080013190 lpfc_nvmet_unsol_fcp_event(
13191 phba, phba->sli4_hba.els_wq->pring, dma_buf,
13192 cq->assoc_qp->isr_timestamp);
James Smart2d7dbc42017-02-12 13:52:35 -080013193 return false;
13194 }
13195drop:
13196 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13197 break;
13198 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13199 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13200 hrq->RQ_no_posted_buf++;
13201 /* Post more buffers if possible */
13202 spin_lock_irqsave(&phba->hbalock, iflags);
13203 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13204 spin_unlock_irqrestore(&phba->hbalock, iflags);
13205 workposted = true;
13206 break;
13207 }
13208out:
13209 return workposted;
13210}
13211
13212/**
James Smart895427b2017-02-12 13:52:30 -080013213 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
James Smart4f774512009-05-22 14:52:35 -040013214 * @cq: Pointer to the completion queue.
13215 * @eqe: Pointer to fast-path completion queue entry.
13216 *
13217 * This routine process a fast-path work queue completion entry from fast-path
13218 * event queue for FCP command response completion.
13219 **/
13220static int
James Smart895427b2017-02-12 13:52:30 -080013221lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013222 struct lpfc_cqe *cqe)
13223{
13224 struct lpfc_wcqe_release wcqe;
13225 bool workposted = false;
13226
13227 /* Copy the work queue CQE and convert endian order if needed */
13228 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13229
13230 /* Check and process for different type of WCQE and dispatch */
13231 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13232 case CQE_CODE_COMPL_WQE:
James Smart895427b2017-02-12 13:52:30 -080013233 case CQE_CODE_NVME_ERSP:
James Smartb84daac2012-08-03 12:35:13 -040013234 cq->CQ_wq++;
James Smart4f774512009-05-22 14:52:35 -040013235 /* Process the WQ complete event */
James Smart98fc5dd2010-06-07 15:24:29 -040013236 phba->last_completion_time = jiffies;
James Smart895427b2017-02-12 13:52:30 -080013237 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13238 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13239 (struct lpfc_wcqe_complete *)&wcqe);
13240 if (cq->subtype == LPFC_NVME_LS)
13241 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
James Smart4f774512009-05-22 14:52:35 -040013242 (struct lpfc_wcqe_complete *)&wcqe);
13243 break;
13244 case CQE_CODE_RELEASE_WQE:
James Smartb84daac2012-08-03 12:35:13 -040013245 cq->CQ_release_wqe++;
James Smart4f774512009-05-22 14:52:35 -040013246 /* Process the WQ release event */
13247 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13248 (struct lpfc_wcqe_release *)&wcqe);
13249 break;
13250 case CQE_CODE_XRI_ABORTED:
James Smartb84daac2012-08-03 12:35:13 -040013251 cq->CQ_xri_aborted++;
James Smart4f774512009-05-22 14:52:35 -040013252 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040013253 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040013254 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13255 (struct sli4_wcqe_xri_aborted *)&wcqe);
13256 break;
James Smart895427b2017-02-12 13:52:30 -080013257 case CQE_CODE_RECEIVE_V1:
13258 case CQE_CODE_RECEIVE:
13259 phba->last_completion_time = jiffies;
James Smart2d7dbc42017-02-12 13:52:35 -080013260 if (cq->subtype == LPFC_NVMET) {
13261 workposted = lpfc_sli4_nvmet_handle_rcqe(
13262 phba, cq, (struct lpfc_rcqe *)&wcqe);
13263 }
James Smart895427b2017-02-12 13:52:30 -080013264 break;
James Smart4f774512009-05-22 14:52:35 -040013265 default:
13266 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -080013267 "0144 Not a valid CQE code: x%x\n",
James Smart4f774512009-05-22 14:52:35 -040013268 bf_get(lpfc_wcqe_c_code, &wcqe));
13269 break;
13270 }
13271 return workposted;
13272}
13273
13274/**
James Smart67d12732012-08-03 12:36:13 -040013275 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
James Smart4f774512009-05-22 14:52:35 -040013276 * @phba: Pointer to HBA context object.
13277 * @eqe: Pointer to fast-path event queue entry.
13278 *
13279 * This routine process a event queue entry from the fast-path event queue.
13280 * It will check the MajorCode and MinorCode to determine this is for a
13281 * completion event on a completion queue, if not, an error shall be logged
13282 * and just return. Otherwise, it will get to the corresponding completion
13283 * queue and process all the entries on the completion queue, rearm the
13284 * completion queue, and then return.
13285 **/
13286static void
James Smart67d12732012-08-03 12:36:13 -040013287lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13288 uint32_t qidx)
James Smart4f774512009-05-22 14:52:35 -040013289{
James Smart895427b2017-02-12 13:52:30 -080013290 struct lpfc_queue *cq = NULL;
James Smart4f774512009-05-22 14:52:35 -040013291 struct lpfc_cqe *cqe;
13292 bool workposted = false;
James Smart2d7dbc42017-02-12 13:52:35 -080013293 uint16_t cqid, id;
James Smart4f774512009-05-22 14:52:35 -040013294 int ecount = 0;
13295
James Smartcb5172e2010-03-15 11:25:07 -040013296 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
James Smart4f774512009-05-22 14:52:35 -040013297 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart67d12732012-08-03 12:36:13 -040013298 "0366 Not a valid completion "
James Smart4f774512009-05-22 14:52:35 -040013299 "event: majorcode=x%x, minorcode=x%x\n",
James Smartcb5172e2010-03-15 11:25:07 -040013300 bf_get_le32(lpfc_eqe_major_code, eqe),
13301 bf_get_le32(lpfc_eqe_minor_code, eqe));
James Smart4f774512009-05-22 14:52:35 -040013302 return;
13303 }
13304
James Smart67d12732012-08-03 12:36:13 -040013305 /* Get the reference to the corresponding CQ */
13306 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13307
James Smart2d7dbc42017-02-12 13:52:35 -080013308 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13309 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13310 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13311 /* Process NVMET unsol rcv */
13312 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13313 goto process_cq;
13314 }
13315 }
13316
James Smart895427b2017-02-12 13:52:30 -080013317 if (phba->sli4_hba.nvme_cq_map &&
13318 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
James Smartf358dd02017-02-12 13:52:34 -080013319 /* Process NVME / NVMET command completion */
James Smart895427b2017-02-12 13:52:30 -080013320 cq = phba->sli4_hba.nvme_cq[qidx];
13321 goto process_cq;
13322 }
13323
13324 if (phba->sli4_hba.fcp_cq_map &&
13325 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13326 /* Process FCP command completion */
13327 cq = phba->sli4_hba.fcp_cq[qidx];
13328 goto process_cq;
13329 }
13330
13331 if (phba->sli4_hba.nvmels_cq &&
13332 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13333 /* Process NVME unsol rcv */
13334 cq = phba->sli4_hba.nvmels_cq;
13335 }
13336
13337 /* Otherwise this is a Slow path event */
13338 if (cq == NULL) {
13339 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
James Smart67d12732012-08-03 12:36:13 -040013340 return;
13341 }
13342
James Smart895427b2017-02-12 13:52:30 -080013343process_cq:
James Smart4f774512009-05-22 14:52:35 -040013344 if (unlikely(cqid != cq->queue_id)) {
13345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13346 "0368 Miss-matched fast-path completion "
13347 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13348 cqid, cq->queue_id);
13349 return;
13350 }
13351
James Smart895427b2017-02-12 13:52:30 -080013352 /* Save EQ associated with this CQ */
13353 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13354
James Smart4f774512009-05-22 14:52:35 -040013355 /* Process all the entries to the CQ */
13356 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart895427b2017-02-12 13:52:30 -080013357 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
James Smart73d91e52011-10-10 21:32:10 -040013358 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040013359 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13360 }
13361
James Smartb84daac2012-08-03 12:35:13 -040013362 /* Track the max number of CQEs processed in 1 EQ */
13363 if (ecount > cq->CQ_max_cqe)
13364 cq->CQ_max_cqe = ecount;
13365
James Smart4f774512009-05-22 14:52:35 -040013366 /* Catch the no cq entry condition */
13367 if (unlikely(ecount == 0))
13368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13369 "0369 No entry from fast-path completion "
13370 "queue fcpcqid=%d\n", cq->queue_id);
13371
13372 /* In any case, flash and re-arm the CQ */
13373 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13374
13375 /* wake up worker thread if there are works to be done */
13376 if (workposted)
13377 lpfc_worker_wake_up(phba);
13378}
13379
13380static void
13381lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13382{
13383 struct lpfc_eqe *eqe;
13384
13385 /* walk all the EQ entries and drop on the floor */
13386 while ((eqe = lpfc_sli4_eq_get(eq)))
13387 ;
13388
13389 /* Clear and re-arm the EQ */
13390 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13391}
13392
James Smart1ba981f2014-02-20 09:56:45 -050013393
13394/**
13395 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13396 * entry
13397 * @phba: Pointer to HBA context object.
13398 * @eqe: Pointer to fast-path event queue entry.
13399 *
13400 * This routine process a event queue entry from the Flash Optimized Fabric
13401 * event queue. It will check the MajorCode and MinorCode to determine this
13402 * is for a completion event on a completion queue, if not, an error shall be
13403 * logged and just return. Otherwise, it will get to the corresponding
13404 * completion queue and process all the entries on the completion queue, rearm
13405 * the completion queue, and then return.
13406 **/
13407static void
13408lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13409{
13410 struct lpfc_queue *cq;
13411 struct lpfc_cqe *cqe;
13412 bool workposted = false;
13413 uint16_t cqid;
13414 int ecount = 0;
13415
13416 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13418 "9147 Not a valid completion "
13419 "event: majorcode=x%x, minorcode=x%x\n",
13420 bf_get_le32(lpfc_eqe_major_code, eqe),
13421 bf_get_le32(lpfc_eqe_minor_code, eqe));
13422 return;
13423 }
13424
13425 /* Get the reference to the corresponding CQ */
13426 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13427
13428 /* Next check for OAS */
13429 cq = phba->sli4_hba.oas_cq;
13430 if (unlikely(!cq)) {
13431 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13432 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13433 "9148 OAS completion queue "
13434 "does not exist\n");
13435 return;
13436 }
13437
13438 if (unlikely(cqid != cq->queue_id)) {
13439 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13440 "9149 Miss-matched fast-path compl "
13441 "queue id: eqcqid=%d, fcpcqid=%d\n",
13442 cqid, cq->queue_id);
13443 return;
13444 }
13445
13446 /* Process all the entries to the OAS CQ */
13447 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart895427b2017-02-12 13:52:30 -080013448 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
James Smart1ba981f2014-02-20 09:56:45 -050013449 if (!(++ecount % cq->entry_repost))
13450 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13451 }
13452
13453 /* Track the max number of CQEs processed in 1 EQ */
13454 if (ecount > cq->CQ_max_cqe)
13455 cq->CQ_max_cqe = ecount;
13456
13457 /* Catch the no cq entry condition */
13458 if (unlikely(ecount == 0))
13459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13460 "9153 No entry from fast-path completion "
13461 "queue fcpcqid=%d\n", cq->queue_id);
13462
13463 /* In any case, flash and re-arm the CQ */
13464 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13465
13466 /* wake up worker thread if there are works to be done */
13467 if (workposted)
13468 lpfc_worker_wake_up(phba);
13469}
13470
13471/**
13472 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13473 * @irq: Interrupt number.
13474 * @dev_id: The device context pointer.
13475 *
13476 * This function is directly called from the PCI layer as an interrupt
13477 * service routine when device with SLI-4 interface spec is enabled with
13478 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13479 * IOCB ring event in the HBA. However, when the device is enabled with either
13480 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13481 * device-level interrupt handler. When the PCI slot is in error recovery
13482 * or the HBA is undergoing initialization, the interrupt handler will not
13483 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13484 * the intrrupt context. This function is called without any lock held.
13485 * It gets the hbalock to access and update SLI data structures. Note that,
13486 * the EQ to CQ are one-to-one map such that the EQ index is
13487 * equal to that of CQ index.
13488 *
13489 * This function returns IRQ_HANDLED when interrupt is handled else it
13490 * returns IRQ_NONE.
13491 **/
13492irqreturn_t
13493lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13494{
13495 struct lpfc_hba *phba;
James Smart895427b2017-02-12 13:52:30 -080013496 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart1ba981f2014-02-20 09:56:45 -050013497 struct lpfc_queue *eq;
13498 struct lpfc_eqe *eqe;
13499 unsigned long iflag;
13500 int ecount = 0;
James Smart1ba981f2014-02-20 09:56:45 -050013501
13502 /* Get the driver's phba structure from the dev_id */
James Smart895427b2017-02-12 13:52:30 -080013503 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13504 phba = hba_eq_hdl->phba;
James Smart1ba981f2014-02-20 09:56:45 -050013505
13506 if (unlikely(!phba))
13507 return IRQ_NONE;
13508
13509 /* Get to the EQ struct associated with this vector */
13510 eq = phba->sli4_hba.fof_eq;
13511 if (unlikely(!eq))
13512 return IRQ_NONE;
13513
13514 /* Check device state for handling interrupt */
13515 if (unlikely(lpfc_intr_state_check(phba))) {
13516 eq->EQ_badstate++;
13517 /* Check again for link_state with lock held */
13518 spin_lock_irqsave(&phba->hbalock, iflag);
13519 if (phba->link_state < LPFC_LINK_DOWN)
13520 /* Flush, clear interrupt, and rearm the EQ */
13521 lpfc_sli4_eq_flush(phba, eq);
13522 spin_unlock_irqrestore(&phba->hbalock, iflag);
13523 return IRQ_NONE;
13524 }
13525
13526 /*
13527 * Process all the event on FCP fast-path EQ
13528 */
13529 while ((eqe = lpfc_sli4_eq_get(eq))) {
13530 lpfc_sli4_fof_handle_eqe(phba, eqe);
13531 if (!(++ecount % eq->entry_repost))
13532 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
13533 eq->EQ_processed++;
13534 }
13535
13536 /* Track the max number of EQEs processed in 1 intr */
13537 if (ecount > eq->EQ_max_eqe)
13538 eq->EQ_max_eqe = ecount;
13539
13540
13541 if (unlikely(ecount == 0)) {
13542 eq->EQ_no_entry++;
13543
13544 if (phba->intr_type == MSIX)
13545 /* MSI-X treated interrupt served as no EQ share INT */
13546 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13547 "9145 MSI-X interrupt with no EQE\n");
13548 else {
13549 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13550 "9146 ISR interrupt with no EQE\n");
13551 /* Non MSI-X treated on interrupt as EQ share INT */
13552 return IRQ_NONE;
13553 }
13554 }
13555 /* Always clear and re-arm the fast-path EQ */
13556 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13557 return IRQ_HANDLED;
13558}
13559
James Smart4f774512009-05-22 14:52:35 -040013560/**
James Smart67d12732012-08-03 12:36:13 -040013561 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
James Smart4f774512009-05-22 14:52:35 -040013562 * @irq: Interrupt number.
13563 * @dev_id: The device context pointer.
13564 *
13565 * This function is directly called from the PCI layer as an interrupt
13566 * service routine when device with SLI-4 interface spec is enabled with
13567 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13568 * ring event in the HBA. However, when the device is enabled with either
13569 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13570 * device-level interrupt handler. When the PCI slot is in error recovery
13571 * or the HBA is undergoing initialization, the interrupt handler will not
13572 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13573 * the intrrupt context. This function is called without any lock held.
13574 * It gets the hbalock to access and update SLI data structures. Note that,
13575 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13576 * equal to that of FCP CQ index.
13577 *
James Smart67d12732012-08-03 12:36:13 -040013578 * The link attention and ELS ring attention events are handled
13579 * by the worker thread. The interrupt handler signals the worker thread
13580 * and returns for these events. This function is called without any lock
13581 * held. It gets the hbalock to access and update SLI data structures.
13582 *
James Smart4f774512009-05-22 14:52:35 -040013583 * This function returns IRQ_HANDLED when interrupt is handled else it
13584 * returns IRQ_NONE.
13585 **/
13586irqreturn_t
James Smart67d12732012-08-03 12:36:13 -040013587lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
James Smart4f774512009-05-22 14:52:35 -040013588{
13589 struct lpfc_hba *phba;
James Smart895427b2017-02-12 13:52:30 -080013590 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart4f774512009-05-22 14:52:35 -040013591 struct lpfc_queue *fpeq;
13592 struct lpfc_eqe *eqe;
13593 unsigned long iflag;
13594 int ecount = 0;
James Smart895427b2017-02-12 13:52:30 -080013595 int hba_eqidx;
James Smart4f774512009-05-22 14:52:35 -040013596
13597 /* Get the driver's phba structure from the dev_id */
James Smart895427b2017-02-12 13:52:30 -080013598 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13599 phba = hba_eq_hdl->phba;
13600 hba_eqidx = hba_eq_hdl->idx;
James Smart4f774512009-05-22 14:52:35 -040013601
13602 if (unlikely(!phba))
13603 return IRQ_NONE;
James Smart67d12732012-08-03 12:36:13 -040013604 if (unlikely(!phba->sli4_hba.hba_eq))
James Smart5350d872011-10-10 21:33:49 -040013605 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040013606
13607 /* Get to the EQ struct associated with this vector */
James Smart895427b2017-02-12 13:52:30 -080013608 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
James Smart2e90f4b2011-12-13 13:22:37 -050013609 if (unlikely(!fpeq))
13610 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040013611
James Smartbd2cdd52017-02-12 13:52:33 -080013612#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13613 if (phba->ktime_on)
13614 fpeq->isr_timestamp = ktime_get_ns();
13615#endif
13616
James Smartba20c852012-08-03 12:36:52 -040013617 if (lpfc_fcp_look_ahead) {
James Smart895427b2017-02-12 13:52:30 -080013618 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
James Smartba20c852012-08-03 12:36:52 -040013619 lpfc_sli4_eq_clr_intr(fpeq);
13620 else {
James Smart895427b2017-02-12 13:52:30 -080013621 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smartba20c852012-08-03 12:36:52 -040013622 return IRQ_NONE;
13623 }
13624 }
13625
James Smart4f774512009-05-22 14:52:35 -040013626 /* Check device state for handling interrupt */
13627 if (unlikely(lpfc_intr_state_check(phba))) {
James Smartb84daac2012-08-03 12:35:13 -040013628 fpeq->EQ_badstate++;
James Smart4f774512009-05-22 14:52:35 -040013629 /* Check again for link_state with lock held */
13630 spin_lock_irqsave(&phba->hbalock, iflag);
13631 if (phba->link_state < LPFC_LINK_DOWN)
13632 /* Flush, clear interrupt, and rearm the EQ */
13633 lpfc_sli4_eq_flush(phba, fpeq);
13634 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smartba20c852012-08-03 12:36:52 -040013635 if (lpfc_fcp_look_ahead)
James Smart895427b2017-02-12 13:52:30 -080013636 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smart4f774512009-05-22 14:52:35 -040013637 return IRQ_NONE;
13638 }
13639
13640 /*
13641 * Process all the event on FCP fast-path EQ
13642 */
13643 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
James Smarteb016562014-09-03 12:58:06 -040013644 if (eqe == NULL)
13645 break;
13646
James Smart895427b2017-02-12 13:52:30 -080013647 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
James Smart73d91e52011-10-10 21:32:10 -040013648 if (!(++ecount % fpeq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040013649 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
James Smartb84daac2012-08-03 12:35:13 -040013650 fpeq->EQ_processed++;
James Smart4f774512009-05-22 14:52:35 -040013651 }
13652
James Smartb84daac2012-08-03 12:35:13 -040013653 /* Track the max number of EQEs processed in 1 intr */
13654 if (ecount > fpeq->EQ_max_eqe)
13655 fpeq->EQ_max_eqe = ecount;
13656
James Smart4f774512009-05-22 14:52:35 -040013657 /* Always clear and re-arm the fast-path EQ */
13658 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
13659
13660 if (unlikely(ecount == 0)) {
James Smartb84daac2012-08-03 12:35:13 -040013661 fpeq->EQ_no_entry++;
James Smartba20c852012-08-03 12:36:52 -040013662
13663 if (lpfc_fcp_look_ahead) {
James Smart895427b2017-02-12 13:52:30 -080013664 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smartba20c852012-08-03 12:36:52 -040013665 return IRQ_NONE;
13666 }
13667
James Smart4f774512009-05-22 14:52:35 -040013668 if (phba->intr_type == MSIX)
13669 /* MSI-X treated interrupt served as no EQ share INT */
13670 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13671 "0358 MSI-X interrupt with no EQE\n");
13672 else
13673 /* Non MSI-X treated on interrupt as EQ share INT */
13674 return IRQ_NONE;
13675 }
13676
James Smartba20c852012-08-03 12:36:52 -040013677 if (lpfc_fcp_look_ahead)
James Smart895427b2017-02-12 13:52:30 -080013678 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13679
James Smart4f774512009-05-22 14:52:35 -040013680 return IRQ_HANDLED;
13681} /* lpfc_sli4_fp_intr_handler */
13682
13683/**
13684 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
13685 * @irq: Interrupt number.
13686 * @dev_id: The device context pointer.
13687 *
13688 * This function is the device-level interrupt handler to device with SLI-4
13689 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
13690 * interrupt mode is enabled and there is an event in the HBA which requires
13691 * driver attention. This function invokes the slow-path interrupt attention
13692 * handling function and fast-path interrupt attention handling function in
13693 * turn to process the relevant HBA attention events. This function is called
13694 * without any lock held. It gets the hbalock to access and update SLI data
13695 * structures.
13696 *
13697 * This function returns IRQ_HANDLED when interrupt is handled, else it
13698 * returns IRQ_NONE.
13699 **/
13700irqreturn_t
13701lpfc_sli4_intr_handler(int irq, void *dev_id)
13702{
13703 struct lpfc_hba *phba;
James Smart67d12732012-08-03 12:36:13 -040013704 irqreturn_t hba_irq_rc;
13705 bool hba_handled = false;
James Smart895427b2017-02-12 13:52:30 -080013706 int qidx;
James Smart4f774512009-05-22 14:52:35 -040013707
13708 /* Get the driver's phba structure from the dev_id */
13709 phba = (struct lpfc_hba *)dev_id;
13710
13711 if (unlikely(!phba))
13712 return IRQ_NONE;
13713
13714 /*
James Smart4f774512009-05-22 14:52:35 -040013715 * Invoke fast-path host attention interrupt handling as appropriate.
13716 */
James Smart895427b2017-02-12 13:52:30 -080013717 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
James Smart67d12732012-08-03 12:36:13 -040013718 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
James Smart895427b2017-02-12 13:52:30 -080013719 &phba->sli4_hba.hba_eq_hdl[qidx]);
James Smart67d12732012-08-03 12:36:13 -040013720 if (hba_irq_rc == IRQ_HANDLED)
13721 hba_handled |= true;
James Smart4f774512009-05-22 14:52:35 -040013722 }
13723
James Smart1ba981f2014-02-20 09:56:45 -050013724 if (phba->cfg_fof) {
13725 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
James Smart895427b2017-02-12 13:52:30 -080013726 &phba->sli4_hba.hba_eq_hdl[qidx]);
James Smart1ba981f2014-02-20 09:56:45 -050013727 if (hba_irq_rc == IRQ_HANDLED)
13728 hba_handled |= true;
13729 }
13730
James Smart67d12732012-08-03 12:36:13 -040013731 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040013732} /* lpfc_sli4_intr_handler */
13733
13734/**
13735 * lpfc_sli4_queue_free - free a queue structure and associated memory
13736 * @queue: The queue structure to free.
13737 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040013738 * This function frees a queue structure and the DMAable memory used for
James Smart4f774512009-05-22 14:52:35 -040013739 * the host resident queue. This function must be called after destroying the
13740 * queue on the HBA.
13741 **/
13742void
13743lpfc_sli4_queue_free(struct lpfc_queue *queue)
13744{
13745 struct lpfc_dmabuf *dmabuf;
13746
13747 if (!queue)
13748 return;
13749
13750 while (!list_empty(&queue->page_list)) {
13751 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13752 list);
James Smart49198b32010-04-06 15:04:33 -040013753 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
James Smart4f774512009-05-22 14:52:35 -040013754 dmabuf->virt, dmabuf->phys);
13755 kfree(dmabuf);
13756 }
James Smart895427b2017-02-12 13:52:30 -080013757 if (queue->rqbp) {
13758 lpfc_free_rq_buffer(queue->phba, queue);
13759 kfree(queue->rqbp);
13760 }
James Smartd1f525a2017-04-21 16:04:55 -070013761
13762 if (!list_empty(&queue->wq_list))
13763 list_del(&queue->wq_list);
13764
James Smart4f774512009-05-22 14:52:35 -040013765 kfree(queue);
13766 return;
13767}
13768
13769/**
13770 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13771 * @phba: The HBA that this queue is being created on.
13772 * @entry_size: The size of each queue entry for this queue.
13773 * @entry count: The number of entries that this queue will handle.
13774 *
13775 * This function allocates a queue structure and the DMAable memory used for
13776 * the host resident queue. This function must be called before creating the
13777 * queue on the HBA.
13778 **/
13779struct lpfc_queue *
13780lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13781 uint32_t entry_count)
13782{
13783 struct lpfc_queue *queue;
13784 struct lpfc_dmabuf *dmabuf;
13785 int x, total_qe_count;
13786 void *dma_pointer;
James Smartcb5172e2010-03-15 11:25:07 -040013787 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart4f774512009-05-22 14:52:35 -040013788
James Smartcb5172e2010-03-15 11:25:07 -040013789 if (!phba->sli4_hba.pc_sli4_params.supported)
13790 hw_page_size = SLI4_PAGE_SIZE;
13791
James Smart4f774512009-05-22 14:52:35 -040013792 queue = kzalloc(sizeof(struct lpfc_queue) +
13793 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13794 if (!queue)
13795 return NULL;
James Smartcb5172e2010-03-15 11:25:07 -040013796 queue->page_count = (ALIGN(entry_size * entry_count,
13797 hw_page_size))/hw_page_size;
James Smart895427b2017-02-12 13:52:30 -080013798
13799 /* If needed, Adjust page count to match the max the adapter supports */
13800 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
13801 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
13802
James Smart4f774512009-05-22 14:52:35 -040013803 INIT_LIST_HEAD(&queue->list);
James Smart895427b2017-02-12 13:52:30 -080013804 INIT_LIST_HEAD(&queue->wq_list);
James Smart4f774512009-05-22 14:52:35 -040013805 INIT_LIST_HEAD(&queue->page_list);
13806 INIT_LIST_HEAD(&queue->child_list);
13807 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13808 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13809 if (!dmabuf)
13810 goto out_fail;
Joe Perches1aee3832014-09-03 12:56:12 -040013811 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13812 hw_page_size, &dmabuf->phys,
13813 GFP_KERNEL);
James Smart4f774512009-05-22 14:52:35 -040013814 if (!dmabuf->virt) {
13815 kfree(dmabuf);
13816 goto out_fail;
13817 }
13818 dmabuf->buffer_tag = x;
13819 list_add_tail(&dmabuf->list, &queue->page_list);
13820 /* initialize queue's entry array */
13821 dma_pointer = dmabuf->virt;
13822 for (; total_qe_count < entry_count &&
James Smartcb5172e2010-03-15 11:25:07 -040013823 dma_pointer < (hw_page_size + dmabuf->virt);
James Smart4f774512009-05-22 14:52:35 -040013824 total_qe_count++, dma_pointer += entry_size) {
13825 queue->qe[total_qe_count].address = dma_pointer;
13826 }
13827 }
13828 queue->entry_size = entry_size;
13829 queue->entry_count = entry_count;
James Smart73d91e52011-10-10 21:32:10 -040013830
13831 /*
13832 * entry_repost is calculated based on the number of entries in the
13833 * queue. This works out except for RQs. If buffers are NOT initially
13834 * posted for every RQE, entry_repost should be adjusted accordingly.
13835 */
13836 queue->entry_repost = (entry_count >> 3);
13837 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13838 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
James Smart4f774512009-05-22 14:52:35 -040013839 queue->phba = phba;
13840
13841 return queue;
13842out_fail:
13843 lpfc_sli4_queue_free(queue);
13844 return NULL;
13845}
13846
13847/**
James Smart962bc512013-01-03 15:44:00 -050013848 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13849 * @phba: HBA structure that indicates port to create a queue on.
13850 * @pci_barset: PCI BAR set flag.
13851 *
13852 * This function shall perform iomap of the specified PCI BAR address to host
13853 * memory address if not already done so and return it. The returned host
13854 * memory address can be NULL.
13855 */
13856static void __iomem *
13857lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13858{
James Smart962bc512013-01-03 15:44:00 -050013859 if (!phba->pcidev)
13860 return NULL;
James Smart962bc512013-01-03 15:44:00 -050013861
13862 switch (pci_barset) {
13863 case WQ_PCI_BAR_0_AND_1:
James Smart962bc512013-01-03 15:44:00 -050013864 return phba->pci_bar0_memmap_p;
13865 case WQ_PCI_BAR_2_AND_3:
James Smart962bc512013-01-03 15:44:00 -050013866 return phba->pci_bar2_memmap_p;
13867 case WQ_PCI_BAR_4_AND_5:
James Smart962bc512013-01-03 15:44:00 -050013868 return phba->pci_bar4_memmap_p;
13869 default:
13870 break;
13871 }
13872 return NULL;
13873}
13874
13875/**
James Smart895427b2017-02-12 13:52:30 -080013876 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
James Smart173edbb2012-06-12 13:54:50 -040013877 * @phba: HBA structure that indicates port to create a queue on.
13878 * @startq: The starting FCP EQ to modify
13879 *
13880 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
James Smart43140ca2017-03-04 09:30:34 -080013881 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
13882 * updated in one mailbox command.
James Smart173edbb2012-06-12 13:54:50 -040013883 *
13884 * The @phba struct is used to send mailbox command to HBA. The @startq
13885 * is used to get the starting FCP EQ to change.
13886 * This function is asynchronous and will wait for the mailbox
13887 * command to finish before continuing.
13888 *
13889 * On success this function will return a zero. If unable to allocate enough
13890 * memory this function will return -ENOMEM. If the queue create mailbox command
13891 * fails this function will return -ENXIO.
13892 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040013893int
James Smart895427b2017-02-12 13:52:30 -080013894lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
James Smart173edbb2012-06-12 13:54:50 -040013895{
13896 struct lpfc_mbx_modify_eq_delay *eq_delay;
13897 LPFC_MBOXQ_t *mbox;
13898 struct lpfc_queue *eq;
13899 int cnt, rc, length, status = 0;
13900 uint32_t shdr_status, shdr_add_status;
James Smartee020062012-09-29 11:28:52 -040013901 uint32_t result;
James Smart895427b2017-02-12 13:52:30 -080013902 int qidx;
James Smart173edbb2012-06-12 13:54:50 -040013903 union lpfc_sli4_cfg_shdr *shdr;
13904 uint16_t dmult;
13905
James Smart895427b2017-02-12 13:52:30 -080013906 if (startq >= phba->io_channel_irqs)
James Smart173edbb2012-06-12 13:54:50 -040013907 return 0;
13908
13909 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13910 if (!mbox)
13911 return -ENOMEM;
13912 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13913 sizeof(struct lpfc_sli4_cfg_mhdr));
13914 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13915 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13916 length, LPFC_SLI4_MBX_EMBED);
13917 eq_delay = &mbox->u.mqe.un.eq_delay;
13918
13919 /* Calculate delay multiper from maximum interrupt per second */
James Smart895427b2017-02-12 13:52:30 -080013920 result = phba->cfg_fcp_imax / phba->io_channel_irqs;
13921 if (result > LPFC_DMULT_CONST || result == 0)
James Smartee020062012-09-29 11:28:52 -040013922 dmult = 0;
13923 else
13924 dmult = LPFC_DMULT_CONST/result - 1;
James Smart173edbb2012-06-12 13:54:50 -040013925
13926 cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080013927 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
13928 eq = phba->sli4_hba.hba_eq[qidx];
James Smart173edbb2012-06-12 13:54:50 -040013929 if (!eq)
13930 continue;
13931 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13932 eq_delay->u.request.eq[cnt].phase = 0;
13933 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13934 cnt++;
James Smart43140ca2017-03-04 09:30:34 -080013935 if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
James Smart173edbb2012-06-12 13:54:50 -040013936 break;
13937 }
13938 eq_delay->u.request.num_eq = cnt;
13939
13940 mbox->vport = phba->pport;
13941 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13942 mbox->context1 = NULL;
13943 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13944 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13945 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13946 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13947 if (shdr_status || shdr_add_status || rc) {
13948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13949 "2512 MODIFY_EQ_DELAY mailbox failed with "
13950 "status x%x add_status x%x, mbx status x%x\n",
13951 shdr_status, shdr_add_status, rc);
13952 status = -ENXIO;
13953 }
13954 mempool_free(mbox, phba->mbox_mem_pool);
13955 return status;
13956}
13957
13958/**
James Smart4f774512009-05-22 14:52:35 -040013959 * lpfc_eq_create - Create an Event Queue on the HBA
13960 * @phba: HBA structure that indicates port to create a queue on.
13961 * @eq: The queue structure to use to create the event queue.
13962 * @imax: The maximum interrupt per second limit.
13963 *
13964 * This function creates an event queue, as detailed in @eq, on a port,
13965 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
13966 *
13967 * The @phba struct is used to send mailbox command to HBA. The @eq struct
13968 * is used to get the entry count and entry size that are necessary to
13969 * determine the number of pages to allocate and use for this queue. This
13970 * function will send the EQ_CREATE mailbox command to the HBA to setup the
13971 * event queue. This function is asynchronous and will wait for the mailbox
13972 * command to finish before continuing.
13973 *
13974 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040013975 * memory this function will return -ENOMEM. If the queue create mailbox command
13976 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040013977 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040013978int
James Smartee020062012-09-29 11:28:52 -040013979lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
James Smart4f774512009-05-22 14:52:35 -040013980{
13981 struct lpfc_mbx_eq_create *eq_create;
13982 LPFC_MBOXQ_t *mbox;
13983 int rc, length, status = 0;
13984 struct lpfc_dmabuf *dmabuf;
13985 uint32_t shdr_status, shdr_add_status;
13986 union lpfc_sli4_cfg_shdr *shdr;
13987 uint16_t dmult;
James Smart49198b32010-04-06 15:04:33 -040013988 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13989
James Smart2e90f4b2011-12-13 13:22:37 -050013990 /* sanity check on queue memory */
13991 if (!eq)
13992 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040013993 if (!phba->sli4_hba.pc_sli4_params.supported)
13994 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040013995
13996 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13997 if (!mbox)
13998 return -ENOMEM;
13999 length = (sizeof(struct lpfc_mbx_eq_create) -
14000 sizeof(struct lpfc_sli4_cfg_mhdr));
14001 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14002 LPFC_MBOX_OPCODE_EQ_CREATE,
14003 length, LPFC_SLI4_MBX_EMBED);
14004 eq_create = &mbox->u.mqe.un.eq_create;
14005 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14006 eq->page_count);
14007 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14008 LPFC_EQE_SIZE);
14009 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
James Smart2c9c5a02015-04-07 15:07:15 -040014010 /* don't setup delay multiplier using EQ_CREATE */
14011 dmult = 0;
James Smart4f774512009-05-22 14:52:35 -040014012 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14013 dmult);
14014 switch (eq->entry_count) {
14015 default:
14016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14017 "0360 Unsupported EQ count. (%d)\n",
14018 eq->entry_count);
14019 if (eq->entry_count < 256)
14020 return -EINVAL;
14021 /* otherwise default to smallest count (drop through) */
14022 case 256:
14023 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14024 LPFC_EQ_CNT_256);
14025 break;
14026 case 512:
14027 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14028 LPFC_EQ_CNT_512);
14029 break;
14030 case 1024:
14031 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14032 LPFC_EQ_CNT_1024);
14033 break;
14034 case 2048:
14035 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14036 LPFC_EQ_CNT_2048);
14037 break;
14038 case 4096:
14039 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14040 LPFC_EQ_CNT_4096);
14041 break;
14042 }
14043 list_for_each_entry(dmabuf, &eq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014044 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040014045 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14046 putPaddrLow(dmabuf->phys);
14047 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14048 putPaddrHigh(dmabuf->phys);
14049 }
14050 mbox->vport = phba->pport;
14051 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14052 mbox->context1 = NULL;
14053 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14054 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14055 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14056 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14057 if (shdr_status || shdr_add_status || rc) {
14058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14059 "2500 EQ_CREATE mailbox failed with "
14060 "status x%x add_status x%x, mbx status x%x\n",
14061 shdr_status, shdr_add_status, rc);
14062 status = -ENXIO;
14063 }
14064 eq->type = LPFC_EQ;
14065 eq->subtype = LPFC_NONE;
14066 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14067 if (eq->queue_id == 0xFFFF)
14068 status = -ENXIO;
14069 eq->host_index = 0;
14070 eq->hba_index = 0;
14071
James Smart8fa38512009-07-19 10:01:03 -040014072 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014073 return status;
14074}
14075
14076/**
14077 * lpfc_cq_create - Create a Completion Queue on the HBA
14078 * @phba: HBA structure that indicates port to create a queue on.
14079 * @cq: The queue structure to use to create the completion queue.
14080 * @eq: The event queue to bind this completion queue to.
14081 *
14082 * This function creates a completion queue, as detailed in @wq, on a port,
14083 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14084 *
14085 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14086 * is used to get the entry count and entry size that are necessary to
14087 * determine the number of pages to allocate and use for this queue. The @eq
14088 * is used to indicate which event queue to bind this completion queue to. This
14089 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14090 * completion queue. This function is asynchronous and will wait for the mailbox
14091 * command to finish before continuing.
14092 *
14093 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014094 * memory this function will return -ENOMEM. If the queue create mailbox command
14095 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014096 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014097int
James Smart4f774512009-05-22 14:52:35 -040014098lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14099 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14100{
14101 struct lpfc_mbx_cq_create *cq_create;
14102 struct lpfc_dmabuf *dmabuf;
14103 LPFC_MBOXQ_t *mbox;
14104 int rc, length, status = 0;
14105 uint32_t shdr_status, shdr_add_status;
14106 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014107 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14108
James Smart2e90f4b2011-12-13 13:22:37 -050014109 /* sanity check on queue memory */
14110 if (!cq || !eq)
14111 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014112 if (!phba->sli4_hba.pc_sli4_params.supported)
14113 hw_page_size = SLI4_PAGE_SIZE;
14114
James Smart4f774512009-05-22 14:52:35 -040014115 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14116 if (!mbox)
14117 return -ENOMEM;
14118 length = (sizeof(struct lpfc_mbx_cq_create) -
14119 sizeof(struct lpfc_sli4_cfg_mhdr));
14120 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14121 LPFC_MBOX_OPCODE_CQ_CREATE,
14122 length, LPFC_SLI4_MBX_EMBED);
14123 cq_create = &mbox->u.mqe.un.cq_create;
James Smart5a6f1332011-03-11 16:05:35 -050014124 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040014125 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14126 cq->page_count);
14127 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14128 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050014129 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14130 phba->sli4_hba.pc_sli4_params.cqv);
14131 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
James Smartc31098c2011-04-16 11:03:33 -040014132 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
14133 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
James Smart5a6f1332011-03-11 16:05:35 -050014134 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14135 eq->queue_id);
14136 } else {
14137 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14138 eq->queue_id);
14139 }
James Smart4f774512009-05-22 14:52:35 -040014140 switch (cq->entry_count) {
14141 default:
14142 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2ea259e2017-02-12 13:52:27 -080014143 "0361 Unsupported CQ count: "
14144 "entry cnt %d sz %d pg cnt %d repost %d\n",
14145 cq->entry_count, cq->entry_size,
14146 cq->page_count, cq->entry_repost);
James Smart4f4c1862012-06-12 13:54:02 -040014147 if (cq->entry_count < 256) {
14148 status = -EINVAL;
14149 goto out;
14150 }
James Smart4f774512009-05-22 14:52:35 -040014151 /* otherwise default to smallest count (drop through) */
14152 case 256:
14153 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14154 LPFC_CQ_CNT_256);
14155 break;
14156 case 512:
14157 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14158 LPFC_CQ_CNT_512);
14159 break;
14160 case 1024:
14161 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14162 LPFC_CQ_CNT_1024);
14163 break;
14164 }
14165 list_for_each_entry(dmabuf, &cq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014166 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040014167 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14168 putPaddrLow(dmabuf->phys);
14169 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14170 putPaddrHigh(dmabuf->phys);
14171 }
14172 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14173
14174 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040014175 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14176 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14177 if (shdr_status || shdr_add_status || rc) {
14178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14179 "2501 CQ_CREATE mailbox failed with "
14180 "status x%x add_status x%x, mbx status x%x\n",
14181 shdr_status, shdr_add_status, rc);
14182 status = -ENXIO;
14183 goto out;
14184 }
14185 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14186 if (cq->queue_id == 0xFFFF) {
14187 status = -ENXIO;
14188 goto out;
14189 }
14190 /* link the cq onto the parent eq child list */
14191 list_add_tail(&cq->list, &eq->child_list);
14192 /* Set up completion queue's type and subtype */
14193 cq->type = type;
14194 cq->subtype = subtype;
14195 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
James Smart2a622bf2011-02-16 12:40:06 -050014196 cq->assoc_qid = eq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040014197 cq->host_index = 0;
14198 cq->hba_index = 0;
James Smart4f774512009-05-22 14:52:35 -040014199
James Smart8fa38512009-07-19 10:01:03 -040014200out:
14201 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014202 return status;
14203}
14204
14205/**
James Smart2d7dbc42017-02-12 13:52:35 -080014206 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14207 * @phba: HBA structure that indicates port to create a queue on.
14208 * @cqp: The queue structure array to use to create the completion queues.
14209 * @eqp: The event queue array to bind these completion queues to.
14210 *
14211 * This function creates a set of completion queue, s to support MRQ
14212 * as detailed in @cqp, on a port,
14213 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14214 *
14215 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14216 * is used to get the entry count and entry size that are necessary to
14217 * determine the number of pages to allocate and use for this queue. The @eq
14218 * is used to indicate which event queue to bind this completion queue to. This
14219 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14220 * completion queue. This function is asynchronous and will wait for the mailbox
14221 * command to finish before continuing.
14222 *
14223 * On success this function will return a zero. If unable to allocate enough
14224 * memory this function will return -ENOMEM. If the queue create mailbox command
14225 * fails this function will return -ENXIO.
14226 **/
14227int
14228lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14229 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14230{
14231 struct lpfc_queue *cq;
14232 struct lpfc_queue *eq;
14233 struct lpfc_mbx_cq_create_set *cq_set;
14234 struct lpfc_dmabuf *dmabuf;
14235 LPFC_MBOXQ_t *mbox;
14236 int rc, length, alloclen, status = 0;
14237 int cnt, idx, numcq, page_idx = 0;
14238 uint32_t shdr_status, shdr_add_status;
14239 union lpfc_sli4_cfg_shdr *shdr;
14240 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14241
14242 /* sanity check on queue memory */
14243 numcq = phba->cfg_nvmet_mrq;
14244 if (!cqp || !eqp || !numcq)
14245 return -ENODEV;
14246 if (!phba->sli4_hba.pc_sli4_params.supported)
14247 hw_page_size = SLI4_PAGE_SIZE;
14248
14249 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14250 if (!mbox)
14251 return -ENOMEM;
14252
14253 length = sizeof(struct lpfc_mbx_cq_create_set);
14254 length += ((numcq * cqp[0]->page_count) *
14255 sizeof(struct dma_address));
14256 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14257 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14258 LPFC_SLI4_MBX_NEMBED);
14259 if (alloclen < length) {
14260 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14261 "3098 Allocated DMA memory size (%d) is "
14262 "less than the requested DMA memory size "
14263 "(%d)\n", alloclen, length);
14264 status = -ENOMEM;
14265 goto out;
14266 }
14267 cq_set = mbox->sge_array->addr[0];
14268 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14269 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14270
14271 for (idx = 0; idx < numcq; idx++) {
14272 cq = cqp[idx];
14273 eq = eqp[idx];
14274 if (!cq || !eq) {
14275 status = -ENOMEM;
14276 goto out;
14277 }
14278
14279 switch (idx) {
14280 case 0:
14281 bf_set(lpfc_mbx_cq_create_set_page_size,
14282 &cq_set->u.request,
14283 (hw_page_size / SLI4_PAGE_SIZE));
14284 bf_set(lpfc_mbx_cq_create_set_num_pages,
14285 &cq_set->u.request, cq->page_count);
14286 bf_set(lpfc_mbx_cq_create_set_evt,
14287 &cq_set->u.request, 1);
14288 bf_set(lpfc_mbx_cq_create_set_valid,
14289 &cq_set->u.request, 1);
14290 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14291 &cq_set->u.request, 0);
14292 bf_set(lpfc_mbx_cq_create_set_num_cq,
14293 &cq_set->u.request, numcq);
14294 switch (cq->entry_count) {
14295 default:
14296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14297 "3118 Bad CQ count. (%d)\n",
14298 cq->entry_count);
14299 if (cq->entry_count < 256) {
14300 status = -EINVAL;
14301 goto out;
14302 }
14303 /* otherwise default to smallest (drop thru) */
14304 case 256:
14305 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14306 &cq_set->u.request, LPFC_CQ_CNT_256);
14307 break;
14308 case 512:
14309 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14310 &cq_set->u.request, LPFC_CQ_CNT_512);
14311 break;
14312 case 1024:
14313 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14314 &cq_set->u.request, LPFC_CQ_CNT_1024);
14315 break;
14316 }
14317 bf_set(lpfc_mbx_cq_create_set_eq_id0,
14318 &cq_set->u.request, eq->queue_id);
14319 break;
14320 case 1:
14321 bf_set(lpfc_mbx_cq_create_set_eq_id1,
14322 &cq_set->u.request, eq->queue_id);
14323 break;
14324 case 2:
14325 bf_set(lpfc_mbx_cq_create_set_eq_id2,
14326 &cq_set->u.request, eq->queue_id);
14327 break;
14328 case 3:
14329 bf_set(lpfc_mbx_cq_create_set_eq_id3,
14330 &cq_set->u.request, eq->queue_id);
14331 break;
14332 case 4:
14333 bf_set(lpfc_mbx_cq_create_set_eq_id4,
14334 &cq_set->u.request, eq->queue_id);
14335 break;
14336 case 5:
14337 bf_set(lpfc_mbx_cq_create_set_eq_id5,
14338 &cq_set->u.request, eq->queue_id);
14339 break;
14340 case 6:
14341 bf_set(lpfc_mbx_cq_create_set_eq_id6,
14342 &cq_set->u.request, eq->queue_id);
14343 break;
14344 case 7:
14345 bf_set(lpfc_mbx_cq_create_set_eq_id7,
14346 &cq_set->u.request, eq->queue_id);
14347 break;
14348 case 8:
14349 bf_set(lpfc_mbx_cq_create_set_eq_id8,
14350 &cq_set->u.request, eq->queue_id);
14351 break;
14352 case 9:
14353 bf_set(lpfc_mbx_cq_create_set_eq_id9,
14354 &cq_set->u.request, eq->queue_id);
14355 break;
14356 case 10:
14357 bf_set(lpfc_mbx_cq_create_set_eq_id10,
14358 &cq_set->u.request, eq->queue_id);
14359 break;
14360 case 11:
14361 bf_set(lpfc_mbx_cq_create_set_eq_id11,
14362 &cq_set->u.request, eq->queue_id);
14363 break;
14364 case 12:
14365 bf_set(lpfc_mbx_cq_create_set_eq_id12,
14366 &cq_set->u.request, eq->queue_id);
14367 break;
14368 case 13:
14369 bf_set(lpfc_mbx_cq_create_set_eq_id13,
14370 &cq_set->u.request, eq->queue_id);
14371 break;
14372 case 14:
14373 bf_set(lpfc_mbx_cq_create_set_eq_id14,
14374 &cq_set->u.request, eq->queue_id);
14375 break;
14376 case 15:
14377 bf_set(lpfc_mbx_cq_create_set_eq_id15,
14378 &cq_set->u.request, eq->queue_id);
14379 break;
14380 }
14381
14382 /* link the cq onto the parent eq child list */
14383 list_add_tail(&cq->list, &eq->child_list);
14384 /* Set up completion queue's type and subtype */
14385 cq->type = type;
14386 cq->subtype = subtype;
14387 cq->assoc_qid = eq->queue_id;
14388 cq->host_index = 0;
14389 cq->hba_index = 0;
14390
14391 rc = 0;
14392 list_for_each_entry(dmabuf, &cq->page_list, list) {
14393 memset(dmabuf->virt, 0, hw_page_size);
14394 cnt = page_idx + dmabuf->buffer_tag;
14395 cq_set->u.request.page[cnt].addr_lo =
14396 putPaddrLow(dmabuf->phys);
14397 cq_set->u.request.page[cnt].addr_hi =
14398 putPaddrHigh(dmabuf->phys);
14399 rc++;
14400 }
14401 page_idx += rc;
14402 }
14403
14404 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14405
14406 /* The IOCTL status is embedded in the mailbox subheader. */
14407 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14408 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14409 if (shdr_status || shdr_add_status || rc) {
14410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14411 "3119 CQ_CREATE_SET mailbox failed with "
14412 "status x%x add_status x%x, mbx status x%x\n",
14413 shdr_status, shdr_add_status, rc);
14414 status = -ENXIO;
14415 goto out;
14416 }
14417 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14418 if (rc == 0xFFFF) {
14419 status = -ENXIO;
14420 goto out;
14421 }
14422
14423 for (idx = 0; idx < numcq; idx++) {
14424 cq = cqp[idx];
14425 cq->queue_id = rc + idx;
14426 }
14427
14428out:
14429 lpfc_sli4_mbox_cmd_free(phba, mbox);
14430 return status;
14431}
14432
14433/**
James Smartb19a0612010-04-06 14:48:51 -040014434 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
James Smart04c68492009-05-22 14:52:52 -040014435 * @phba: HBA structure that indicates port to create a queue on.
14436 * @mq: The queue structure to use to create the mailbox queue.
James Smartb19a0612010-04-06 14:48:51 -040014437 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14438 * @cq: The completion queue to associate with this cq.
James Smart04c68492009-05-22 14:52:52 -040014439 *
James Smartb19a0612010-04-06 14:48:51 -040014440 * This function provides failback (fb) functionality when the
14441 * mq_create_ext fails on older FW generations. It's purpose is identical
14442 * to mq_create_ext otherwise.
James Smart04c68492009-05-22 14:52:52 -040014443 *
James Smartb19a0612010-04-06 14:48:51 -040014444 * This routine cannot fail as all attributes were previously accessed and
14445 * initialized in mq_create_ext.
James Smart04c68492009-05-22 14:52:52 -040014446 **/
James Smartb19a0612010-04-06 14:48:51 -040014447static void
14448lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14449 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
James Smart04c68492009-05-22 14:52:52 -040014450{
14451 struct lpfc_mbx_mq_create *mq_create;
14452 struct lpfc_dmabuf *dmabuf;
James Smartb19a0612010-04-06 14:48:51 -040014453 int length;
James Smart04c68492009-05-22 14:52:52 -040014454
James Smart04c68492009-05-22 14:52:52 -040014455 length = (sizeof(struct lpfc_mbx_mq_create) -
14456 sizeof(struct lpfc_sli4_cfg_mhdr));
14457 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14458 LPFC_MBOX_OPCODE_MQ_CREATE,
14459 length, LPFC_SLI4_MBX_EMBED);
14460 mq_create = &mbox->u.mqe.un.mq_create;
14461 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
James Smartb19a0612010-04-06 14:48:51 -040014462 mq->page_count);
James Smart04c68492009-05-22 14:52:52 -040014463 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
James Smartb19a0612010-04-06 14:48:51 -040014464 cq->queue_id);
James Smart04c68492009-05-22 14:52:52 -040014465 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14466 switch (mq->entry_count) {
James Smart04c68492009-05-22 14:52:52 -040014467 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050014468 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14469 LPFC_MQ_RING_SIZE_16);
James Smart04c68492009-05-22 14:52:52 -040014470 break;
14471 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050014472 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14473 LPFC_MQ_RING_SIZE_32);
James Smart04c68492009-05-22 14:52:52 -040014474 break;
14475 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050014476 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14477 LPFC_MQ_RING_SIZE_64);
James Smart04c68492009-05-22 14:52:52 -040014478 break;
14479 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050014480 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14481 LPFC_MQ_RING_SIZE_128);
James Smart04c68492009-05-22 14:52:52 -040014482 break;
14483 }
14484 list_for_each_entry(dmabuf, &mq->page_list, list) {
14485 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
James Smartb19a0612010-04-06 14:48:51 -040014486 putPaddrLow(dmabuf->phys);
James Smart04c68492009-05-22 14:52:52 -040014487 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smartb19a0612010-04-06 14:48:51 -040014488 putPaddrHigh(dmabuf->phys);
14489 }
14490}
14491
14492/**
14493 * lpfc_mq_create - Create a mailbox Queue on the HBA
14494 * @phba: HBA structure that indicates port to create a queue on.
14495 * @mq: The queue structure to use to create the mailbox queue.
14496 * @cq: The completion queue to associate with this cq.
14497 * @subtype: The queue's subtype.
14498 *
14499 * This function creates a mailbox queue, as detailed in @mq, on a port,
14500 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14501 *
14502 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14503 * is used to get the entry count and entry size that are necessary to
14504 * determine the number of pages to allocate and use for this queue. This
14505 * function will send the MQ_CREATE mailbox command to the HBA to setup the
14506 * mailbox queue. This function is asynchronous and will wait for the mailbox
14507 * command to finish before continuing.
14508 *
14509 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014510 * memory this function will return -ENOMEM. If the queue create mailbox command
14511 * fails this function will return -ENXIO.
James Smartb19a0612010-04-06 14:48:51 -040014512 **/
14513int32_t
14514lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14515 struct lpfc_queue *cq, uint32_t subtype)
14516{
14517 struct lpfc_mbx_mq_create *mq_create;
14518 struct lpfc_mbx_mq_create_ext *mq_create_ext;
14519 struct lpfc_dmabuf *dmabuf;
14520 LPFC_MBOXQ_t *mbox;
14521 int rc, length, status = 0;
14522 uint32_t shdr_status, shdr_add_status;
14523 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014524 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smartb19a0612010-04-06 14:48:51 -040014525
James Smart2e90f4b2011-12-13 13:22:37 -050014526 /* sanity check on queue memory */
14527 if (!mq || !cq)
14528 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014529 if (!phba->sli4_hba.pc_sli4_params.supported)
14530 hw_page_size = SLI4_PAGE_SIZE;
James Smartb19a0612010-04-06 14:48:51 -040014531
14532 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14533 if (!mbox)
14534 return -ENOMEM;
14535 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14536 sizeof(struct lpfc_sli4_cfg_mhdr));
14537 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14538 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14539 length, LPFC_SLI4_MBX_EMBED);
14540
14541 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
James Smart5a6f1332011-03-11 16:05:35 -050014542 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
James Smart70f3c072010-12-15 17:57:33 -050014543 bf_set(lpfc_mbx_mq_create_ext_num_pages,
14544 &mq_create_ext->u.request, mq->page_count);
14545 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14546 &mq_create_ext->u.request, 1);
14547 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
James Smartb19a0612010-04-06 14:48:51 -040014548 &mq_create_ext->u.request, 1);
14549 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14550 &mq_create_ext->u.request, 1);
James Smart70f3c072010-12-15 17:57:33 -050014551 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14552 &mq_create_ext->u.request, 1);
14553 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14554 &mq_create_ext->u.request, 1);
James Smartb19a0612010-04-06 14:48:51 -040014555 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050014556 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14557 phba->sli4_hba.pc_sli4_params.mqv);
14558 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14559 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14560 cq->queue_id);
14561 else
14562 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14563 cq->queue_id);
James Smartb19a0612010-04-06 14:48:51 -040014564 switch (mq->entry_count) {
14565 default:
14566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14567 "0362 Unsupported MQ count. (%d)\n",
14568 mq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040014569 if (mq->entry_count < 16) {
14570 status = -EINVAL;
14571 goto out;
14572 }
James Smartb19a0612010-04-06 14:48:51 -040014573 /* otherwise default to smallest count (drop through) */
14574 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050014575 bf_set(lpfc_mq_context_ring_size,
14576 &mq_create_ext->u.request.context,
14577 LPFC_MQ_RING_SIZE_16);
James Smartb19a0612010-04-06 14:48:51 -040014578 break;
14579 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050014580 bf_set(lpfc_mq_context_ring_size,
14581 &mq_create_ext->u.request.context,
14582 LPFC_MQ_RING_SIZE_32);
James Smartb19a0612010-04-06 14:48:51 -040014583 break;
14584 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050014585 bf_set(lpfc_mq_context_ring_size,
14586 &mq_create_ext->u.request.context,
14587 LPFC_MQ_RING_SIZE_64);
James Smartb19a0612010-04-06 14:48:51 -040014588 break;
14589 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050014590 bf_set(lpfc_mq_context_ring_size,
14591 &mq_create_ext->u.request.context,
14592 LPFC_MQ_RING_SIZE_128);
James Smartb19a0612010-04-06 14:48:51 -040014593 break;
14594 }
14595 list_for_each_entry(dmabuf, &mq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014596 memset(dmabuf->virt, 0, hw_page_size);
James Smartb19a0612010-04-06 14:48:51 -040014597 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
14598 putPaddrLow(dmabuf->phys);
14599 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smart04c68492009-05-22 14:52:52 -040014600 putPaddrHigh(dmabuf->phys);
14601 }
14602 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smartb19a0612010-04-06 14:48:51 -040014603 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14604 &mq_create_ext->u.response);
14605 if (rc != MBX_SUCCESS) {
14606 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14607 "2795 MQ_CREATE_EXT failed with "
14608 "status x%x. Failback to MQ_CREATE.\n",
14609 rc);
14610 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
14611 mq_create = &mbox->u.mqe.un.mq_create;
14612 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14613 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
14614 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14615 &mq_create->u.response);
14616 }
14617
James Smart04c68492009-05-22 14:52:52 -040014618 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart04c68492009-05-22 14:52:52 -040014619 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14620 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14621 if (shdr_status || shdr_add_status || rc) {
14622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14623 "2502 MQ_CREATE mailbox failed with "
14624 "status x%x add_status x%x, mbx status x%x\n",
14625 shdr_status, shdr_add_status, rc);
14626 status = -ENXIO;
14627 goto out;
14628 }
James Smart04c68492009-05-22 14:52:52 -040014629 if (mq->queue_id == 0xFFFF) {
14630 status = -ENXIO;
14631 goto out;
14632 }
14633 mq->type = LPFC_MQ;
James Smart2a622bf2011-02-16 12:40:06 -050014634 mq->assoc_qid = cq->queue_id;
James Smart04c68492009-05-22 14:52:52 -040014635 mq->subtype = subtype;
14636 mq->host_index = 0;
14637 mq->hba_index = 0;
14638
14639 /* link the mq onto the parent cq child list */
14640 list_add_tail(&mq->list, &cq->child_list);
14641out:
James Smart8fa38512009-07-19 10:01:03 -040014642 mempool_free(mbox, phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040014643 return status;
14644}
14645
14646/**
James Smart4f774512009-05-22 14:52:35 -040014647 * lpfc_wq_create - Create a Work Queue on the HBA
14648 * @phba: HBA structure that indicates port to create a queue on.
14649 * @wq: The queue structure to use to create the work queue.
14650 * @cq: The completion queue to bind this work queue to.
14651 * @subtype: The subtype of the work queue indicating its functionality.
14652 *
14653 * This function creates a work queue, as detailed in @wq, on a port, described
14654 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
14655 *
14656 * The @phba struct is used to send mailbox command to HBA. The @wq struct
14657 * is used to get the entry count and entry size that are necessary to
14658 * determine the number of pages to allocate and use for this queue. The @cq
14659 * is used to indicate which completion queue to bind this work queue to. This
14660 * function will send the WQ_CREATE mailbox command to the HBA to setup the
14661 * work queue. This function is asynchronous and will wait for the mailbox
14662 * command to finish before continuing.
14663 *
14664 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014665 * memory this function will return -ENOMEM. If the queue create mailbox command
14666 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014667 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014668int
James Smart4f774512009-05-22 14:52:35 -040014669lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14670 struct lpfc_queue *cq, uint32_t subtype)
14671{
14672 struct lpfc_mbx_wq_create *wq_create;
14673 struct lpfc_dmabuf *dmabuf;
14674 LPFC_MBOXQ_t *mbox;
14675 int rc, length, status = 0;
14676 uint32_t shdr_status, shdr_add_status;
14677 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014678 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart5a6f1332011-03-11 16:05:35 -050014679 struct dma_address *page;
James Smart962bc512013-01-03 15:44:00 -050014680 void __iomem *bar_memmap_p;
14681 uint32_t db_offset;
14682 uint16_t pci_barset;
James Smart49198b32010-04-06 15:04:33 -040014683
James Smart2e90f4b2011-12-13 13:22:37 -050014684 /* sanity check on queue memory */
14685 if (!wq || !cq)
14686 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014687 if (!phba->sli4_hba.pc_sli4_params.supported)
14688 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040014689
14690 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14691 if (!mbox)
14692 return -ENOMEM;
14693 length = (sizeof(struct lpfc_mbx_wq_create) -
14694 sizeof(struct lpfc_sli4_cfg_mhdr));
14695 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14696 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
14697 length, LPFC_SLI4_MBX_EMBED);
14698 wq_create = &mbox->u.mqe.un.wq_create;
James Smart5a6f1332011-03-11 16:05:35 -050014699 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040014700 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
14701 wq->page_count);
14702 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
14703 cq->queue_id);
James Smart0c651872013-07-15 18:33:23 -040014704
14705 /* wqv is the earliest version supported, NOT the latest */
James Smart5a6f1332011-03-11 16:05:35 -050014706 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14707 phba->sli4_hba.pc_sli4_params.wqv);
James Smart962bc512013-01-03 15:44:00 -050014708
James Smart0c651872013-07-15 18:33:23 -040014709 switch (phba->sli4_hba.pc_sli4_params.wqv) {
14710 case LPFC_Q_CREATE_VERSION_0:
14711 switch (wq->entry_size) {
14712 default:
14713 case 64:
14714 /* Nothing to do, version 0 ONLY supports 64 byte */
14715 page = wq_create->u.request.page;
14716 break;
14717 case 128:
14718 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14719 LPFC_WQ_SZ128_SUPPORT)) {
14720 status = -ERANGE;
14721 goto out;
14722 }
14723 /* If we get here the HBA MUST also support V1 and
14724 * we MUST use it
14725 */
14726 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14727 LPFC_Q_CREATE_VERSION_1);
14728
14729 bf_set(lpfc_mbx_wq_create_wqe_count,
14730 &wq_create->u.request_1, wq->entry_count);
14731 bf_set(lpfc_mbx_wq_create_wqe_size,
14732 &wq_create->u.request_1,
14733 LPFC_WQ_WQE_SIZE_128);
14734 bf_set(lpfc_mbx_wq_create_page_size,
14735 &wq_create->u.request_1,
James Smart8ea73db2017-02-12 13:52:25 -080014736 LPFC_WQ_PAGE_SIZE_4096);
James Smart0c651872013-07-15 18:33:23 -040014737 page = wq_create->u.request_1.page;
14738 break;
14739 }
14740 break;
14741 case LPFC_Q_CREATE_VERSION_1:
James Smart5a6f1332011-03-11 16:05:35 -050014742 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
14743 wq->entry_count);
James Smart3f247de2017-04-21 16:04:56 -070014744 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14745 LPFC_Q_CREATE_VERSION_1);
14746
James Smart5a6f1332011-03-11 16:05:35 -050014747 switch (wq->entry_size) {
14748 default:
14749 case 64:
14750 bf_set(lpfc_mbx_wq_create_wqe_size,
14751 &wq_create->u.request_1,
14752 LPFC_WQ_WQE_SIZE_64);
14753 break;
14754 case 128:
James Smart0c651872013-07-15 18:33:23 -040014755 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14756 LPFC_WQ_SZ128_SUPPORT)) {
14757 status = -ERANGE;
14758 goto out;
14759 }
James Smart5a6f1332011-03-11 16:05:35 -050014760 bf_set(lpfc_mbx_wq_create_wqe_size,
14761 &wq_create->u.request_1,
14762 LPFC_WQ_WQE_SIZE_128);
14763 break;
14764 }
James Smart8ea73db2017-02-12 13:52:25 -080014765 bf_set(lpfc_mbx_wq_create_page_size,
14766 &wq_create->u.request_1,
14767 LPFC_WQ_PAGE_SIZE_4096);
James Smart5a6f1332011-03-11 16:05:35 -050014768 page = wq_create->u.request_1.page;
James Smart0c651872013-07-15 18:33:23 -040014769 break;
14770 default:
14771 status = -ERANGE;
14772 goto out;
James Smart5a6f1332011-03-11 16:05:35 -050014773 }
James Smart0c651872013-07-15 18:33:23 -040014774
James Smart4f774512009-05-22 14:52:35 -040014775 list_for_each_entry(dmabuf, &wq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014776 memset(dmabuf->virt, 0, hw_page_size);
James Smart5a6f1332011-03-11 16:05:35 -050014777 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
14778 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
James Smart4f774512009-05-22 14:52:35 -040014779 }
James Smart962bc512013-01-03 15:44:00 -050014780
14781 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14782 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
14783
James Smart4f774512009-05-22 14:52:35 -040014784 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14785 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040014786 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14787 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14788 if (shdr_status || shdr_add_status || rc) {
14789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14790 "2503 WQ_CREATE mailbox failed with "
14791 "status x%x add_status x%x, mbx status x%x\n",
14792 shdr_status, shdr_add_status, rc);
14793 status = -ENXIO;
14794 goto out;
14795 }
14796 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
14797 if (wq->queue_id == 0xFFFF) {
14798 status = -ENXIO;
14799 goto out;
14800 }
James Smart962bc512013-01-03 15:44:00 -050014801 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14802 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
14803 &wq_create->u.response);
14804 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
14805 (wq->db_format != LPFC_DB_RING_FORMAT)) {
14806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14807 "3265 WQ[%d] doorbell format not "
14808 "supported: x%x\n", wq->queue_id,
14809 wq->db_format);
14810 status = -EINVAL;
14811 goto out;
14812 }
14813 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
14814 &wq_create->u.response);
14815 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14816 if (!bar_memmap_p) {
14817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14818 "3263 WQ[%d] failed to memmap pci "
14819 "barset:x%x\n", wq->queue_id,
14820 pci_barset);
14821 status = -ENOMEM;
14822 goto out;
14823 }
14824 db_offset = wq_create->u.response.doorbell_offset;
14825 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
14826 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
14827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14828 "3252 WQ[%d] doorbell offset not "
14829 "supported: x%x\n", wq->queue_id,
14830 db_offset);
14831 status = -EINVAL;
14832 goto out;
14833 }
14834 wq->db_regaddr = bar_memmap_p + db_offset;
14835 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta22e7db2013-04-17 20:16:37 -040014836 "3264 WQ[%d]: barset:x%x, offset:x%x, "
14837 "format:x%x\n", wq->queue_id, pci_barset,
14838 db_offset, wq->db_format);
James Smart962bc512013-01-03 15:44:00 -050014839 } else {
14840 wq->db_format = LPFC_DB_LIST_FORMAT;
14841 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
14842 }
James Smart895427b2017-02-12 13:52:30 -080014843 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
14844 if (wq->pring == NULL) {
14845 status = -ENOMEM;
14846 goto out;
14847 }
James Smart4f774512009-05-22 14:52:35 -040014848 wq->type = LPFC_WQ;
James Smart2a622bf2011-02-16 12:40:06 -050014849 wq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040014850 wq->subtype = subtype;
14851 wq->host_index = 0;
14852 wq->hba_index = 0;
James Smartff78d8f2011-12-13 13:21:35 -050014853 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
James Smart4f774512009-05-22 14:52:35 -040014854
14855 /* link the wq onto the parent cq child list */
14856 list_add_tail(&wq->list, &cq->child_list);
14857out:
James Smart8fa38512009-07-19 10:01:03 -040014858 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014859 return status;
14860}
14861
14862/**
James Smart73d91e52011-10-10 21:32:10 -040014863 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
14864 * @phba: HBA structure that indicates port to create a queue on.
14865 * @rq: The queue structure to use for the receive queue.
14866 * @qno: The associated HBQ number
14867 *
14868 *
14869 * For SLI4 we need to adjust the RQ repost value based on
14870 * the number of buffers that are initially posted to the RQ.
14871 */
14872void
14873lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
14874{
14875 uint32_t cnt;
14876
James Smart2e90f4b2011-12-13 13:22:37 -050014877 /* sanity check on queue memory */
14878 if (!rq)
14879 return;
James Smart73d91e52011-10-10 21:32:10 -040014880 cnt = lpfc_hbq_defs[qno]->entry_count;
14881
14882 /* Recalc repost for RQs based on buffers initially posted */
14883 cnt = (cnt >> 3);
14884 if (cnt < LPFC_QUEUE_MIN_REPOST)
14885 cnt = LPFC_QUEUE_MIN_REPOST;
14886
14887 rq->entry_repost = cnt;
14888}
14889
14890/**
James Smart4f774512009-05-22 14:52:35 -040014891 * lpfc_rq_create - Create a Receive Queue on the HBA
14892 * @phba: HBA structure that indicates port to create a queue on.
14893 * @hrq: The queue structure to use to create the header receive queue.
14894 * @drq: The queue structure to use to create the data receive queue.
14895 * @cq: The completion queue to bind this work queue to.
14896 *
14897 * This function creates a receive buffer queue pair , as detailed in @hrq and
14898 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
14899 * to the HBA.
14900 *
14901 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
14902 * struct is used to get the entry count that is necessary to determine the
14903 * number of pages to use for this queue. The @cq is used to indicate which
14904 * completion queue to bind received buffers that are posted to these queues to.
14905 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
14906 * receive queue pair. This function is asynchronous and will wait for the
14907 * mailbox command to finish before continuing.
14908 *
14909 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014910 * memory this function will return -ENOMEM. If the queue create mailbox command
14911 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014912 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014913int
James Smart4f774512009-05-22 14:52:35 -040014914lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14915 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
14916{
14917 struct lpfc_mbx_rq_create *rq_create;
14918 struct lpfc_dmabuf *dmabuf;
14919 LPFC_MBOXQ_t *mbox;
14920 int rc, length, status = 0;
14921 uint32_t shdr_status, shdr_add_status;
14922 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014923 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart962bc512013-01-03 15:44:00 -050014924 void __iomem *bar_memmap_p;
14925 uint32_t db_offset;
14926 uint16_t pci_barset;
James Smart49198b32010-04-06 15:04:33 -040014927
James Smart2e90f4b2011-12-13 13:22:37 -050014928 /* sanity check on queue memory */
14929 if (!hrq || !drq || !cq)
14930 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014931 if (!phba->sli4_hba.pc_sli4_params.supported)
14932 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040014933
14934 if (hrq->entry_count != drq->entry_count)
14935 return -EINVAL;
14936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14937 if (!mbox)
14938 return -ENOMEM;
14939 length = (sizeof(struct lpfc_mbx_rq_create) -
14940 sizeof(struct lpfc_sli4_cfg_mhdr));
14941 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14942 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14943 length, LPFC_SLI4_MBX_EMBED);
14944 rq_create = &mbox->u.mqe.un.rq_create;
James Smart5a6f1332011-03-11 16:05:35 -050014945 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14946 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14947 phba->sli4_hba.pc_sli4_params.rqv);
14948 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14949 bf_set(lpfc_rq_context_rqe_count_1,
14950 &rq_create->u.request.context,
14951 hrq->entry_count);
14952 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040014953 bf_set(lpfc_rq_context_rqe_size,
14954 &rq_create->u.request.context,
14955 LPFC_RQE_SIZE_8);
14956 bf_set(lpfc_rq_context_page_size,
14957 &rq_create->u.request.context,
James Smart8ea73db2017-02-12 13:52:25 -080014958 LPFC_RQ_PAGE_SIZE_4096);
James Smart5a6f1332011-03-11 16:05:35 -050014959 } else {
14960 switch (hrq->entry_count) {
14961 default:
14962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14963 "2535 Unsupported RQ count. (%d)\n",
14964 hrq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040014965 if (hrq->entry_count < 512) {
14966 status = -EINVAL;
14967 goto out;
14968 }
James Smart5a6f1332011-03-11 16:05:35 -050014969 /* otherwise default to smallest count (drop through) */
14970 case 512:
14971 bf_set(lpfc_rq_context_rqe_count,
14972 &rq_create->u.request.context,
14973 LPFC_RQ_RING_SIZE_512);
14974 break;
14975 case 1024:
14976 bf_set(lpfc_rq_context_rqe_count,
14977 &rq_create->u.request.context,
14978 LPFC_RQ_RING_SIZE_1024);
14979 break;
14980 case 2048:
14981 bf_set(lpfc_rq_context_rqe_count,
14982 &rq_create->u.request.context,
14983 LPFC_RQ_RING_SIZE_2048);
14984 break;
14985 case 4096:
14986 bf_set(lpfc_rq_context_rqe_count,
14987 &rq_create->u.request.context,
14988 LPFC_RQ_RING_SIZE_4096);
14989 break;
14990 }
14991 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14992 LPFC_HDR_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040014993 }
14994 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14995 cq->queue_id);
14996 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14997 hrq->page_count);
James Smart4f774512009-05-22 14:52:35 -040014998 list_for_each_entry(dmabuf, &hrq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014999 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040015000 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15001 putPaddrLow(dmabuf->phys);
15002 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15003 putPaddrHigh(dmabuf->phys);
15004 }
James Smart962bc512013-01-03 15:44:00 -050015005 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15006 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15007
James Smart4f774512009-05-22 14:52:35 -040015008 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15009 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040015010 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15011 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15012 if (shdr_status || shdr_add_status || rc) {
15013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15014 "2504 RQ_CREATE mailbox failed with "
15015 "status x%x add_status x%x, mbx status x%x\n",
15016 shdr_status, shdr_add_status, rc);
15017 status = -ENXIO;
15018 goto out;
15019 }
15020 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15021 if (hrq->queue_id == 0xFFFF) {
15022 status = -ENXIO;
15023 goto out;
15024 }
James Smart962bc512013-01-03 15:44:00 -050015025
15026 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15027 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15028 &rq_create->u.response);
15029 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15030 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15032 "3262 RQ [%d] doorbell format not "
15033 "supported: x%x\n", hrq->queue_id,
15034 hrq->db_format);
15035 status = -EINVAL;
15036 goto out;
15037 }
15038
15039 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15040 &rq_create->u.response);
15041 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15042 if (!bar_memmap_p) {
15043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15044 "3269 RQ[%d] failed to memmap pci "
15045 "barset:x%x\n", hrq->queue_id,
15046 pci_barset);
15047 status = -ENOMEM;
15048 goto out;
15049 }
15050
15051 db_offset = rq_create->u.response.doorbell_offset;
15052 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15053 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15055 "3270 RQ[%d] doorbell offset not "
15056 "supported: x%x\n", hrq->queue_id,
15057 db_offset);
15058 status = -EINVAL;
15059 goto out;
15060 }
15061 hrq->db_regaddr = bar_memmap_p + db_offset;
15062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta22e7db2013-04-17 20:16:37 -040015063 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15064 "format:x%x\n", hrq->queue_id, pci_barset,
15065 db_offset, hrq->db_format);
James Smart962bc512013-01-03 15:44:00 -050015066 } else {
15067 hrq->db_format = LPFC_DB_RING_FORMAT;
15068 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15069 }
James Smart4f774512009-05-22 14:52:35 -040015070 hrq->type = LPFC_HRQ;
James Smart2a622bf2011-02-16 12:40:06 -050015071 hrq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040015072 hrq->subtype = subtype;
15073 hrq->host_index = 0;
15074 hrq->hba_index = 0;
15075
15076 /* now create the data queue */
15077 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15078 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15079 length, LPFC_SLI4_MBX_EMBED);
James Smart5a6f1332011-03-11 16:05:35 -050015080 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15081 phba->sli4_hba.pc_sli4_params.rqv);
15082 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15083 bf_set(lpfc_rq_context_rqe_count_1,
James Smartc31098c2011-04-16 11:03:33 -040015084 &rq_create->u.request.context, hrq->entry_count);
James Smart5a6f1332011-03-11 16:05:35 -050015085 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040015086 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15087 LPFC_RQE_SIZE_8);
15088 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15089 (PAGE_SIZE/SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050015090 } else {
15091 switch (drq->entry_count) {
15092 default:
15093 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15094 "2536 Unsupported RQ count. (%d)\n",
15095 drq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040015096 if (drq->entry_count < 512) {
15097 status = -EINVAL;
15098 goto out;
15099 }
James Smart5a6f1332011-03-11 16:05:35 -050015100 /* otherwise default to smallest count (drop through) */
15101 case 512:
15102 bf_set(lpfc_rq_context_rqe_count,
15103 &rq_create->u.request.context,
15104 LPFC_RQ_RING_SIZE_512);
15105 break;
15106 case 1024:
15107 bf_set(lpfc_rq_context_rqe_count,
15108 &rq_create->u.request.context,
15109 LPFC_RQ_RING_SIZE_1024);
15110 break;
15111 case 2048:
15112 bf_set(lpfc_rq_context_rqe_count,
15113 &rq_create->u.request.context,
15114 LPFC_RQ_RING_SIZE_2048);
15115 break;
15116 case 4096:
15117 bf_set(lpfc_rq_context_rqe_count,
15118 &rq_create->u.request.context,
15119 LPFC_RQ_RING_SIZE_4096);
15120 break;
15121 }
15122 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15123 LPFC_DATA_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040015124 }
15125 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15126 cq->queue_id);
15127 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15128 drq->page_count);
James Smart4f774512009-05-22 14:52:35 -040015129 list_for_each_entry(dmabuf, &drq->page_list, list) {
15130 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15131 putPaddrLow(dmabuf->phys);
15132 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15133 putPaddrHigh(dmabuf->phys);
15134 }
James Smart962bc512013-01-03 15:44:00 -050015135 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15136 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
James Smart4f774512009-05-22 14:52:35 -040015137 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15138 /* The IOCTL status is embedded in the mailbox subheader. */
15139 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15140 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15141 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15142 if (shdr_status || shdr_add_status || rc) {
15143 status = -ENXIO;
15144 goto out;
15145 }
15146 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15147 if (drq->queue_id == 0xFFFF) {
15148 status = -ENXIO;
15149 goto out;
15150 }
15151 drq->type = LPFC_DRQ;
James Smart2a622bf2011-02-16 12:40:06 -050015152 drq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040015153 drq->subtype = subtype;
15154 drq->host_index = 0;
15155 drq->hba_index = 0;
15156
15157 /* link the header and data RQs onto the parent cq child list */
15158 list_add_tail(&hrq->list, &cq->child_list);
15159 list_add_tail(&drq->list, &cq->child_list);
15160
15161out:
James Smart8fa38512009-07-19 10:01:03 -040015162 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015163 return status;
15164}
15165
15166/**
James Smart2d7dbc42017-02-12 13:52:35 -080015167 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15168 * @phba: HBA structure that indicates port to create a queue on.
15169 * @hrqp: The queue structure array to use to create the header receive queues.
15170 * @drqp: The queue structure array to use to create the data receive queues.
15171 * @cqp: The completion queue array to bind these receive queues to.
15172 *
15173 * This function creates a receive buffer queue pair , as detailed in @hrq and
15174 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15175 * to the HBA.
15176 *
15177 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15178 * struct is used to get the entry count that is necessary to determine the
15179 * number of pages to use for this queue. The @cq is used to indicate which
15180 * completion queue to bind received buffers that are posted to these queues to.
15181 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15182 * receive queue pair. This function is asynchronous and will wait for the
15183 * mailbox command to finish before continuing.
15184 *
15185 * On success this function will return a zero. If unable to allocate enough
15186 * memory this function will return -ENOMEM. If the queue create mailbox command
15187 * fails this function will return -ENXIO.
15188 **/
15189int
15190lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15191 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15192 uint32_t subtype)
15193{
15194 struct lpfc_queue *hrq, *drq, *cq;
15195 struct lpfc_mbx_rq_create_v2 *rq_create;
15196 struct lpfc_dmabuf *dmabuf;
15197 LPFC_MBOXQ_t *mbox;
15198 int rc, length, alloclen, status = 0;
15199 int cnt, idx, numrq, page_idx = 0;
15200 uint32_t shdr_status, shdr_add_status;
15201 union lpfc_sli4_cfg_shdr *shdr;
15202 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15203
15204 numrq = phba->cfg_nvmet_mrq;
15205 /* sanity check on array memory */
15206 if (!hrqp || !drqp || !cqp || !numrq)
15207 return -ENODEV;
15208 if (!phba->sli4_hba.pc_sli4_params.supported)
15209 hw_page_size = SLI4_PAGE_SIZE;
15210
15211 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15212 if (!mbox)
15213 return -ENOMEM;
15214
15215 length = sizeof(struct lpfc_mbx_rq_create_v2);
15216 length += ((2 * numrq * hrqp[0]->page_count) *
15217 sizeof(struct dma_address));
15218
15219 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15220 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15221 LPFC_SLI4_MBX_NEMBED);
15222 if (alloclen < length) {
15223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15224 "3099 Allocated DMA memory size (%d) is "
15225 "less than the requested DMA memory size "
15226 "(%d)\n", alloclen, length);
15227 status = -ENOMEM;
15228 goto out;
15229 }
15230
15231
15232
15233 rq_create = mbox->sge_array->addr[0];
15234 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15235
15236 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15237 cnt = 0;
15238
15239 for (idx = 0; idx < numrq; idx++) {
15240 hrq = hrqp[idx];
15241 drq = drqp[idx];
15242 cq = cqp[idx];
15243
James Smart2d7dbc42017-02-12 13:52:35 -080015244 /* sanity check on queue memory */
15245 if (!hrq || !drq || !cq) {
15246 status = -ENODEV;
15247 goto out;
15248 }
15249
James Smart7aabe842017-03-04 09:30:22 -080015250 if (hrq->entry_count != drq->entry_count) {
15251 status = -EINVAL;
15252 goto out;
15253 }
15254
James Smart2d7dbc42017-02-12 13:52:35 -080015255 if (idx == 0) {
15256 bf_set(lpfc_mbx_rq_create_num_pages,
15257 &rq_create->u.request,
15258 hrq->page_count);
15259 bf_set(lpfc_mbx_rq_create_rq_cnt,
15260 &rq_create->u.request, (numrq * 2));
15261 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15262 1);
15263 bf_set(lpfc_rq_context_base_cq,
15264 &rq_create->u.request.context,
15265 cq->queue_id);
15266 bf_set(lpfc_rq_context_data_size,
15267 &rq_create->u.request.context,
15268 LPFC_DATA_BUF_SIZE);
15269 bf_set(lpfc_rq_context_hdr_size,
15270 &rq_create->u.request.context,
15271 LPFC_HDR_BUF_SIZE);
15272 bf_set(lpfc_rq_context_rqe_count_1,
15273 &rq_create->u.request.context,
15274 hrq->entry_count);
15275 bf_set(lpfc_rq_context_rqe_size,
15276 &rq_create->u.request.context,
15277 LPFC_RQE_SIZE_8);
15278 bf_set(lpfc_rq_context_page_size,
15279 &rq_create->u.request.context,
15280 (PAGE_SIZE/SLI4_PAGE_SIZE));
15281 }
15282 rc = 0;
15283 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15284 memset(dmabuf->virt, 0, hw_page_size);
15285 cnt = page_idx + dmabuf->buffer_tag;
15286 rq_create->u.request.page[cnt].addr_lo =
15287 putPaddrLow(dmabuf->phys);
15288 rq_create->u.request.page[cnt].addr_hi =
15289 putPaddrHigh(dmabuf->phys);
15290 rc++;
15291 }
15292 page_idx += rc;
15293
15294 rc = 0;
15295 list_for_each_entry(dmabuf, &drq->page_list, list) {
15296 memset(dmabuf->virt, 0, hw_page_size);
15297 cnt = page_idx + dmabuf->buffer_tag;
15298 rq_create->u.request.page[cnt].addr_lo =
15299 putPaddrLow(dmabuf->phys);
15300 rq_create->u.request.page[cnt].addr_hi =
15301 putPaddrHigh(dmabuf->phys);
15302 rc++;
15303 }
15304 page_idx += rc;
15305
15306 hrq->db_format = LPFC_DB_RING_FORMAT;
15307 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15308 hrq->type = LPFC_HRQ;
15309 hrq->assoc_qid = cq->queue_id;
15310 hrq->subtype = subtype;
15311 hrq->host_index = 0;
15312 hrq->hba_index = 0;
15313
15314 drq->db_format = LPFC_DB_RING_FORMAT;
15315 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15316 drq->type = LPFC_DRQ;
15317 drq->assoc_qid = cq->queue_id;
15318 drq->subtype = subtype;
15319 drq->host_index = 0;
15320 drq->hba_index = 0;
15321
15322 list_add_tail(&hrq->list, &cq->child_list);
15323 list_add_tail(&drq->list, &cq->child_list);
15324 }
15325
15326 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15327 /* The IOCTL status is embedded in the mailbox subheader. */
15328 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15329 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15330 if (shdr_status || shdr_add_status || rc) {
15331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15332 "3120 RQ_CREATE mailbox failed with "
15333 "status x%x add_status x%x, mbx status x%x\n",
15334 shdr_status, shdr_add_status, rc);
15335 status = -ENXIO;
15336 goto out;
15337 }
15338 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15339 if (rc == 0xFFFF) {
15340 status = -ENXIO;
15341 goto out;
15342 }
15343
15344 /* Initialize all RQs with associated queue id */
15345 for (idx = 0; idx < numrq; idx++) {
15346 hrq = hrqp[idx];
15347 hrq->queue_id = rc + (2 * idx);
15348 drq = drqp[idx];
15349 drq->queue_id = rc + (2 * idx) + 1;
15350 }
15351
15352out:
15353 lpfc_sli4_mbox_cmd_free(phba, mbox);
15354 return status;
15355}
15356
15357/**
James Smart4f774512009-05-22 14:52:35 -040015358 * lpfc_eq_destroy - Destroy an event Queue on the HBA
15359 * @eq: The queue structure associated with the queue to destroy.
15360 *
15361 * This function destroys a queue, as detailed in @eq by sending an mailbox
15362 * command, specific to the type of queue, to the HBA.
15363 *
15364 * The @eq struct is used to get the queue ID of the queue to destroy.
15365 *
15366 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040015367 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015368 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015369int
James Smart4f774512009-05-22 14:52:35 -040015370lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15371{
15372 LPFC_MBOXQ_t *mbox;
15373 int rc, length, status = 0;
15374 uint32_t shdr_status, shdr_add_status;
15375 union lpfc_sli4_cfg_shdr *shdr;
15376
James Smart2e90f4b2011-12-13 13:22:37 -050015377 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040015378 if (!eq)
15379 return -ENODEV;
15380 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15381 if (!mbox)
15382 return -ENOMEM;
15383 length = (sizeof(struct lpfc_mbx_eq_destroy) -
15384 sizeof(struct lpfc_sli4_cfg_mhdr));
15385 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15386 LPFC_MBOX_OPCODE_EQ_DESTROY,
15387 length, LPFC_SLI4_MBX_EMBED);
15388 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15389 eq->queue_id);
15390 mbox->vport = eq->phba->pport;
15391 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15392
15393 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15394 /* The IOCTL status is embedded in the mailbox subheader. */
15395 shdr = (union lpfc_sli4_cfg_shdr *)
15396 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15397 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15398 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15399 if (shdr_status || shdr_add_status || rc) {
15400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15401 "2505 EQ_DESTROY mailbox failed with "
15402 "status x%x add_status x%x, mbx status x%x\n",
15403 shdr_status, shdr_add_status, rc);
15404 status = -ENXIO;
15405 }
15406
15407 /* Remove eq from any list */
15408 list_del_init(&eq->list);
James Smart8fa38512009-07-19 10:01:03 -040015409 mempool_free(mbox, eq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015410 return status;
15411}
15412
15413/**
15414 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15415 * @cq: The queue structure associated with the queue to destroy.
15416 *
15417 * This function destroys a queue, as detailed in @cq by sending an mailbox
15418 * command, specific to the type of queue, to the HBA.
15419 *
15420 * The @cq struct is used to get the queue ID of the queue to destroy.
15421 *
15422 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040015423 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015424 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015425int
James Smart4f774512009-05-22 14:52:35 -040015426lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15427{
15428 LPFC_MBOXQ_t *mbox;
15429 int rc, length, status = 0;
15430 uint32_t shdr_status, shdr_add_status;
15431 union lpfc_sli4_cfg_shdr *shdr;
15432
James Smart2e90f4b2011-12-13 13:22:37 -050015433 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040015434 if (!cq)
15435 return -ENODEV;
15436 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15437 if (!mbox)
15438 return -ENOMEM;
15439 length = (sizeof(struct lpfc_mbx_cq_destroy) -
15440 sizeof(struct lpfc_sli4_cfg_mhdr));
15441 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15442 LPFC_MBOX_OPCODE_CQ_DESTROY,
15443 length, LPFC_SLI4_MBX_EMBED);
15444 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15445 cq->queue_id);
15446 mbox->vport = cq->phba->pport;
15447 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15448 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15449 /* The IOCTL status is embedded in the mailbox subheader. */
15450 shdr = (union lpfc_sli4_cfg_shdr *)
15451 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15452 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15453 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15454 if (shdr_status || shdr_add_status || rc) {
15455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15456 "2506 CQ_DESTROY mailbox failed with "
15457 "status x%x add_status x%x, mbx status x%x\n",
15458 shdr_status, shdr_add_status, rc);
15459 status = -ENXIO;
15460 }
15461 /* Remove cq from any list */
15462 list_del_init(&cq->list);
James Smart8fa38512009-07-19 10:01:03 -040015463 mempool_free(mbox, cq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015464 return status;
15465}
15466
15467/**
James Smart04c68492009-05-22 14:52:52 -040015468 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
15469 * @qm: The queue structure associated with the queue to destroy.
15470 *
15471 * This function destroys a queue, as detailed in @mq by sending an mailbox
15472 * command, specific to the type of queue, to the HBA.
15473 *
15474 * The @mq struct is used to get the queue ID of the queue to destroy.
15475 *
15476 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040015477 * command fails this function will return -ENXIO.
James Smart04c68492009-05-22 14:52:52 -040015478 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015479int
James Smart04c68492009-05-22 14:52:52 -040015480lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15481{
15482 LPFC_MBOXQ_t *mbox;
15483 int rc, length, status = 0;
15484 uint32_t shdr_status, shdr_add_status;
15485 union lpfc_sli4_cfg_shdr *shdr;
15486
James Smart2e90f4b2011-12-13 13:22:37 -050015487 /* sanity check on queue memory */
James Smart04c68492009-05-22 14:52:52 -040015488 if (!mq)
15489 return -ENODEV;
15490 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15491 if (!mbox)
15492 return -ENOMEM;
15493 length = (sizeof(struct lpfc_mbx_mq_destroy) -
15494 sizeof(struct lpfc_sli4_cfg_mhdr));
15495 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15496 LPFC_MBOX_OPCODE_MQ_DESTROY,
15497 length, LPFC_SLI4_MBX_EMBED);
15498 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15499 mq->queue_id);
15500 mbox->vport = mq->phba->pport;
15501 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15502 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15503 /* The IOCTL status is embedded in the mailbox subheader. */
15504 shdr = (union lpfc_sli4_cfg_shdr *)
15505 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15507 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15508 if (shdr_status || shdr_add_status || rc) {
15509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15510 "2507 MQ_DESTROY mailbox failed with "
15511 "status x%x add_status x%x, mbx status x%x\n",
15512 shdr_status, shdr_add_status, rc);
15513 status = -ENXIO;
15514 }
15515 /* Remove mq from any list */
15516 list_del_init(&mq->list);
James Smart8fa38512009-07-19 10:01:03 -040015517 mempool_free(mbox, mq->phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040015518 return status;
15519}
15520
15521/**
James Smart4f774512009-05-22 14:52:35 -040015522 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
15523 * @wq: The queue structure associated with the queue to destroy.
15524 *
15525 * This function destroys a queue, as detailed in @wq by sending an mailbox
15526 * command, specific to the type of queue, to the HBA.
15527 *
15528 * The @wq struct is used to get the queue ID of the queue to destroy.
15529 *
15530 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040015531 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015532 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015533int
James Smart4f774512009-05-22 14:52:35 -040015534lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
15535{
15536 LPFC_MBOXQ_t *mbox;
15537 int rc, length, status = 0;
15538 uint32_t shdr_status, shdr_add_status;
15539 union lpfc_sli4_cfg_shdr *shdr;
15540
James Smart2e90f4b2011-12-13 13:22:37 -050015541 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040015542 if (!wq)
15543 return -ENODEV;
15544 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
15545 if (!mbox)
15546 return -ENOMEM;
15547 length = (sizeof(struct lpfc_mbx_wq_destroy) -
15548 sizeof(struct lpfc_sli4_cfg_mhdr));
15549 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15550 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
15551 length, LPFC_SLI4_MBX_EMBED);
15552 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
15553 wq->queue_id);
15554 mbox->vport = wq->phba->pport;
15555 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15556 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
15557 shdr = (union lpfc_sli4_cfg_shdr *)
15558 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
15559 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15560 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15561 if (shdr_status || shdr_add_status || rc) {
15562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15563 "2508 WQ_DESTROY mailbox failed with "
15564 "status x%x add_status x%x, mbx status x%x\n",
15565 shdr_status, shdr_add_status, rc);
15566 status = -ENXIO;
15567 }
15568 /* Remove wq from any list */
15569 list_del_init(&wq->list);
James Smartd1f525a2017-04-21 16:04:55 -070015570 kfree(wq->pring);
15571 wq->pring = NULL;
James Smart8fa38512009-07-19 10:01:03 -040015572 mempool_free(mbox, wq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015573 return status;
15574}
15575
15576/**
15577 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
15578 * @rq: The queue structure associated with the queue to destroy.
15579 *
15580 * This function destroys a queue, as detailed in @rq by sending an mailbox
15581 * command, specific to the type of queue, to the HBA.
15582 *
15583 * The @rq struct is used to get the queue ID of the queue to destroy.
15584 *
15585 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040015586 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015587 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015588int
James Smart4f774512009-05-22 14:52:35 -040015589lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15590 struct lpfc_queue *drq)
15591{
15592 LPFC_MBOXQ_t *mbox;
15593 int rc, length, status = 0;
15594 uint32_t shdr_status, shdr_add_status;
15595 union lpfc_sli4_cfg_shdr *shdr;
15596
James Smart2e90f4b2011-12-13 13:22:37 -050015597 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040015598 if (!hrq || !drq)
15599 return -ENODEV;
15600 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
15601 if (!mbox)
15602 return -ENOMEM;
15603 length = (sizeof(struct lpfc_mbx_rq_destroy) -
James Smartfedd3b72011-02-16 12:39:24 -050015604 sizeof(struct lpfc_sli4_cfg_mhdr));
James Smart4f774512009-05-22 14:52:35 -040015605 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15606 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
15607 length, LPFC_SLI4_MBX_EMBED);
15608 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15609 hrq->queue_id);
15610 mbox->vport = hrq->phba->pport;
15611 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15612 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
15613 /* The IOCTL status is embedded in the mailbox subheader. */
15614 shdr = (union lpfc_sli4_cfg_shdr *)
15615 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15616 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15617 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15618 if (shdr_status || shdr_add_status || rc) {
15619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15620 "2509 RQ_DESTROY mailbox failed with "
15621 "status x%x add_status x%x, mbx status x%x\n",
15622 shdr_status, shdr_add_status, rc);
15623 if (rc != MBX_TIMEOUT)
15624 mempool_free(mbox, hrq->phba->mbox_mem_pool);
15625 return -ENXIO;
15626 }
15627 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15628 drq->queue_id);
15629 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
15630 shdr = (union lpfc_sli4_cfg_shdr *)
15631 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15632 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15633 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15634 if (shdr_status || shdr_add_status || rc) {
15635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15636 "2510 RQ_DESTROY mailbox failed with "
15637 "status x%x add_status x%x, mbx status x%x\n",
15638 shdr_status, shdr_add_status, rc);
15639 status = -ENXIO;
15640 }
15641 list_del_init(&hrq->list);
15642 list_del_init(&drq->list);
James Smart8fa38512009-07-19 10:01:03 -040015643 mempool_free(mbox, hrq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015644 return status;
15645}
15646
15647/**
15648 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
15649 * @phba: The virtual port for which this call being executed.
15650 * @pdma_phys_addr0: Physical address of the 1st SGL page.
15651 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
15652 * @xritag: the xritag that ties this io to the SGL pages.
15653 *
15654 * This routine will post the sgl pages for the IO that has the xritag
15655 * that is in the iocbq structure. The xritag is assigned during iocbq
15656 * creation and persists for as long as the driver is loaded.
15657 * if the caller has fewer than 256 scatter gather segments to map then
15658 * pdma_phys_addr1 should be 0.
15659 * If the caller needs to map more than 256 scatter gather segment then
15660 * pdma_phys_addr1 should be a valid physical address.
15661 * physical address for SGLs must be 64 byte aligned.
15662 * If you are going to map 2 SGL's then the first one must have 256 entries
15663 * the second sgl can have between 1 and 256 entries.
15664 *
15665 * Return codes:
15666 * 0 - Success
15667 * -ENXIO, -ENOMEM - Failure
15668 **/
15669int
15670lpfc_sli4_post_sgl(struct lpfc_hba *phba,
15671 dma_addr_t pdma_phys_addr0,
15672 dma_addr_t pdma_phys_addr1,
15673 uint16_t xritag)
15674{
15675 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
15676 LPFC_MBOXQ_t *mbox;
15677 int rc;
15678 uint32_t shdr_status, shdr_add_status;
James Smart6d368e52011-05-24 11:44:12 -040015679 uint32_t mbox_tmo;
James Smart4f774512009-05-22 14:52:35 -040015680 union lpfc_sli4_cfg_shdr *shdr;
15681
15682 if (xritag == NO_XRI) {
15683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15684 "0364 Invalid param:\n");
15685 return -EINVAL;
15686 }
15687
15688 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15689 if (!mbox)
15690 return -ENOMEM;
15691
15692 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15693 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
15694 sizeof(struct lpfc_mbx_post_sgl_pages) -
James Smartfedd3b72011-02-16 12:39:24 -050015695 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
James Smart4f774512009-05-22 14:52:35 -040015696
15697 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
15698 &mbox->u.mqe.un.post_sgl_pages;
15699 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
15700 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
15701
15702 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
15703 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
15704 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
15705 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
15706
15707 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
15708 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
15709 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
15710 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
15711 if (!phba->sli4_hba.intr_enable)
15712 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart6d368e52011-05-24 11:44:12 -040015713 else {
James Smarta183a152011-10-10 21:32:43 -040015714 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040015715 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15716 }
James Smart4f774512009-05-22 14:52:35 -040015717 /* The IOCTL status is embedded in the mailbox subheader. */
15718 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
15719 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15720 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15721 if (rc != MBX_TIMEOUT)
15722 mempool_free(mbox, phba->mbox_mem_pool);
15723 if (shdr_status || shdr_add_status || rc) {
15724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15725 "2511 POST_SGL mailbox failed with "
15726 "status x%x add_status x%x, mbx status x%x\n",
15727 shdr_status, shdr_add_status, rc);
James Smart4f774512009-05-22 14:52:35 -040015728 }
15729 return 0;
15730}
James Smart4f774512009-05-22 14:52:35 -040015731
15732/**
James Smart88a2cfb2011-07-22 18:36:33 -040015733 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
James Smart6d368e52011-05-24 11:44:12 -040015734 * @phba: pointer to lpfc hba data structure.
15735 *
15736 * This routine is invoked to post rpi header templates to the
James Smart88a2cfb2011-07-22 18:36:33 -040015737 * HBA consistent with the SLI-4 interface spec. This routine
15738 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15739 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6d368e52011-05-24 11:44:12 -040015740 *
James Smart88a2cfb2011-07-22 18:36:33 -040015741 * Returns
15742 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15743 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15744 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040015745static uint16_t
James Smart6d368e52011-05-24 11:44:12 -040015746lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
15747{
15748 unsigned long xri;
15749
15750 /*
15751 * Fetch the next logical xri. Because this index is logical,
15752 * the driver starts at 0 each time.
15753 */
15754 spin_lock_irq(&phba->hbalock);
15755 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
15756 phba->sli4_hba.max_cfg_param.max_xri, 0);
15757 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
15758 spin_unlock_irq(&phba->hbalock);
15759 return NO_XRI;
15760 } else {
15761 set_bit(xri, phba->sli4_hba.xri_bmask);
15762 phba->sli4_hba.max_cfg_param.xri_used++;
James Smart6d368e52011-05-24 11:44:12 -040015763 }
James Smart6d368e52011-05-24 11:44:12 -040015764 spin_unlock_irq(&phba->hbalock);
15765 return xri;
15766}
15767
15768/**
15769 * lpfc_sli4_free_xri - Release an xri for reuse.
15770 * @phba: pointer to lpfc hba data structure.
15771 *
15772 * This routine is invoked to release an xri to the pool of
15773 * available rpis maintained by the driver.
15774 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040015775static void
James Smart6d368e52011-05-24 11:44:12 -040015776__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15777{
15778 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
James Smart6d368e52011-05-24 11:44:12 -040015779 phba->sli4_hba.max_cfg_param.xri_used--;
15780 }
15781}
15782
15783/**
15784 * lpfc_sli4_free_xri - Release an xri for reuse.
15785 * @phba: pointer to lpfc hba data structure.
15786 *
15787 * This routine is invoked to release an xri to the pool of
15788 * available rpis maintained by the driver.
15789 **/
15790void
15791lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15792{
15793 spin_lock_irq(&phba->hbalock);
15794 __lpfc_sli4_free_xri(phba, xri);
15795 spin_unlock_irq(&phba->hbalock);
15796}
15797
15798/**
James Smart4f774512009-05-22 14:52:35 -040015799 * lpfc_sli4_next_xritag - Get an xritag for the io
15800 * @phba: Pointer to HBA context object.
15801 *
15802 * This function gets an xritag for the iocb. If there is no unused xritag
15803 * it will return 0xffff.
15804 * The function returns the allocated xritag if successful, else returns zero.
15805 * Zero is not a valid xritag.
15806 * The caller is not required to hold any lock.
15807 **/
15808uint16_t
15809lpfc_sli4_next_xritag(struct lpfc_hba *phba)
15810{
James Smart6d368e52011-05-24 11:44:12 -040015811 uint16_t xri_index;
James Smart4f774512009-05-22 14:52:35 -040015812
James Smart6d368e52011-05-24 11:44:12 -040015813 xri_index = lpfc_sli4_alloc_xri(phba);
James Smart81378052012-05-09 21:17:37 -040015814 if (xri_index == NO_XRI)
15815 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15816 "2004 Failed to allocate XRI.last XRITAG is %d"
15817 " Max XRI is %d, Used XRI is %d\n",
15818 xri_index,
15819 phba->sli4_hba.max_cfg_param.max_xri,
15820 phba->sli4_hba.max_cfg_param.xri_used);
15821 return xri_index;
James Smart4f774512009-05-22 14:52:35 -040015822}
15823
15824/**
James Smart895427b2017-02-12 13:52:30 -080015825 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
James Smart4f774512009-05-22 14:52:35 -040015826 * @phba: pointer to lpfc hba data structure.
James Smart8a9d2e82012-05-09 21:16:12 -040015827 * @post_sgl_list: pointer to els sgl entry list.
15828 * @count: number of els sgl entries on the list.
James Smart4f774512009-05-22 14:52:35 -040015829 *
15830 * This routine is invoked to post a block of driver's sgl pages to the
15831 * HBA using non-embedded mailbox command. No Lock is held. This routine
15832 * is only called when the driver is loading and after all IO has been
15833 * stopped.
15834 **/
James Smart8a9d2e82012-05-09 21:16:12 -040015835static int
James Smart895427b2017-02-12 13:52:30 -080015836lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
James Smart8a9d2e82012-05-09 21:16:12 -040015837 struct list_head *post_sgl_list,
15838 int post_cnt)
James Smart4f774512009-05-22 14:52:35 -040015839{
James Smart8a9d2e82012-05-09 21:16:12 -040015840 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
James Smart4f774512009-05-22 14:52:35 -040015841 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15842 struct sgl_page_pairs *sgl_pg_pairs;
15843 void *viraddr;
15844 LPFC_MBOXQ_t *mbox;
15845 uint32_t reqlen, alloclen, pg_pairs;
15846 uint32_t mbox_tmo;
James Smart8a9d2e82012-05-09 21:16:12 -040015847 uint16_t xritag_start = 0;
15848 int rc = 0;
James Smart4f774512009-05-22 14:52:35 -040015849 uint32_t shdr_status, shdr_add_status;
15850 union lpfc_sli4_cfg_shdr *shdr;
15851
James Smart895427b2017-02-12 13:52:30 -080015852 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040015853 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040015854 if (reqlen > SLI4_PAGE_SIZE) {
James Smart895427b2017-02-12 13:52:30 -080015855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart4f774512009-05-22 14:52:35 -040015856 "2559 Block sgl registration required DMA "
15857 "size (%d) great than a page\n", reqlen);
15858 return -ENOMEM;
15859 }
James Smart895427b2017-02-12 13:52:30 -080015860
James Smart4f774512009-05-22 14:52:35 -040015861 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -040015862 if (!mbox)
James Smart4f774512009-05-22 14:52:35 -040015863 return -ENOMEM;
James Smart4f774512009-05-22 14:52:35 -040015864
15865 /* Allocate DMA memory and set up the non-embedded mailbox command */
15866 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15867 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15868 LPFC_SLI4_MBX_NEMBED);
15869
15870 if (alloclen < reqlen) {
15871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15872 "0285 Allocated DMA memory size (%d) is "
15873 "less than the requested DMA memory "
15874 "size (%d)\n", alloclen, reqlen);
15875 lpfc_sli4_mbox_cmd_free(phba, mbox);
15876 return -ENOMEM;
15877 }
James Smart4f774512009-05-22 14:52:35 -040015878 /* Set up the SGL pages in the non-embedded DMA pages */
James Smart6d368e52011-05-24 11:44:12 -040015879 viraddr = mbox->sge_array->addr[0];
James Smart4f774512009-05-22 14:52:35 -040015880 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15881 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15882
James Smart8a9d2e82012-05-09 21:16:12 -040015883 pg_pairs = 0;
15884 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
James Smart4f774512009-05-22 14:52:35 -040015885 /* Set up the sge entry */
15886 sgl_pg_pairs->sgl_pg0_addr_lo =
15887 cpu_to_le32(putPaddrLow(sglq_entry->phys));
15888 sgl_pg_pairs->sgl_pg0_addr_hi =
15889 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
15890 sgl_pg_pairs->sgl_pg1_addr_lo =
15891 cpu_to_le32(putPaddrLow(0));
15892 sgl_pg_pairs->sgl_pg1_addr_hi =
15893 cpu_to_le32(putPaddrHigh(0));
James Smart6d368e52011-05-24 11:44:12 -040015894
James Smart4f774512009-05-22 14:52:35 -040015895 /* Keep the first xritag on the list */
15896 if (pg_pairs == 0)
15897 xritag_start = sglq_entry->sli4_xritag;
15898 sgl_pg_pairs++;
James Smart8a9d2e82012-05-09 21:16:12 -040015899 pg_pairs++;
James Smart4f774512009-05-22 14:52:35 -040015900 }
James Smart6d368e52011-05-24 11:44:12 -040015901
15902 /* Complete initialization and perform endian conversion. */
James Smart4f774512009-05-22 14:52:35 -040015903 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
James Smart895427b2017-02-12 13:52:30 -080015904 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
James Smart4f774512009-05-22 14:52:35 -040015905 sgl->word0 = cpu_to_le32(sgl->word0);
James Smart895427b2017-02-12 13:52:30 -080015906
James Smart4f774512009-05-22 14:52:35 -040015907 if (!phba->sli4_hba.intr_enable)
15908 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15909 else {
James Smarta183a152011-10-10 21:32:43 -040015910 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040015911 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15912 }
15913 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
15914 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15915 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15916 if (rc != MBX_TIMEOUT)
15917 lpfc_sli4_mbox_cmd_free(phba, mbox);
15918 if (shdr_status || shdr_add_status || rc) {
15919 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15920 "2513 POST_SGL_BLOCK mailbox command failed "
15921 "status x%x add_status x%x mbx status x%x\n",
15922 shdr_status, shdr_add_status, rc);
15923 rc = -ENXIO;
15924 }
15925 return rc;
15926}
15927
15928/**
15929 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
15930 * @phba: pointer to lpfc hba data structure.
15931 * @sblist: pointer to scsi buffer list.
15932 * @count: number of scsi buffers on the list.
15933 *
15934 * This routine is invoked to post a block of @count scsi sgl pages from a
15935 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
15936 * No Lock is held.
15937 *
15938 **/
15939int
James Smart8a9d2e82012-05-09 21:16:12 -040015940lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
15941 struct list_head *sblist,
15942 int count)
James Smart4f774512009-05-22 14:52:35 -040015943{
15944 struct lpfc_scsi_buf *psb;
15945 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15946 struct sgl_page_pairs *sgl_pg_pairs;
15947 void *viraddr;
15948 LPFC_MBOXQ_t *mbox;
15949 uint32_t reqlen, alloclen, pg_pairs;
15950 uint32_t mbox_tmo;
15951 uint16_t xritag_start = 0;
15952 int rc = 0;
15953 uint32_t shdr_status, shdr_add_status;
15954 dma_addr_t pdma_phys_bpl1;
15955 union lpfc_sli4_cfg_shdr *shdr;
15956
15957 /* Calculate the requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -040015958 reqlen = count * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040015959 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040015960 if (reqlen > SLI4_PAGE_SIZE) {
James Smart4f774512009-05-22 14:52:35 -040015961 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
15962 "0217 Block sgl registration required DMA "
15963 "size (%d) great than a page\n", reqlen);
15964 return -ENOMEM;
15965 }
15966 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15967 if (!mbox) {
15968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15969 "0283 Failed to allocate mbox cmd memory\n");
15970 return -ENOMEM;
15971 }
15972
15973 /* Allocate DMA memory and set up the non-embedded mailbox command */
15974 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15975 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15976 LPFC_SLI4_MBX_NEMBED);
15977
15978 if (alloclen < reqlen) {
15979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15980 "2561 Allocated DMA memory size (%d) is "
15981 "less than the requested DMA memory "
15982 "size (%d)\n", alloclen, reqlen);
15983 lpfc_sli4_mbox_cmd_free(phba, mbox);
15984 return -ENOMEM;
15985 }
James Smart6d368e52011-05-24 11:44:12 -040015986
James Smart4f774512009-05-22 14:52:35 -040015987 /* Get the first SGE entry from the non-embedded DMA memory */
James Smart4f774512009-05-22 14:52:35 -040015988 viraddr = mbox->sge_array->addr[0];
15989
15990 /* Set up the SGL pages in the non-embedded DMA pages */
15991 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15992 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15993
15994 pg_pairs = 0;
15995 list_for_each_entry(psb, sblist, list) {
15996 /* Set up the sge entry */
15997 sgl_pg_pairs->sgl_pg0_addr_lo =
15998 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
15999 sgl_pg_pairs->sgl_pg0_addr_hi =
16000 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16001 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16002 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16003 else
16004 pdma_phys_bpl1 = 0;
16005 sgl_pg_pairs->sgl_pg1_addr_lo =
16006 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16007 sgl_pg_pairs->sgl_pg1_addr_hi =
16008 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16009 /* Keep the first xritag on the list */
16010 if (pg_pairs == 0)
16011 xritag_start = psb->cur_iocbq.sli4_xritag;
16012 sgl_pg_pairs++;
16013 pg_pairs++;
16014 }
16015 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16016 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16017 /* Perform endian conversion if necessary */
16018 sgl->word0 = cpu_to_le32(sgl->word0);
16019
16020 if (!phba->sli4_hba.intr_enable)
16021 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16022 else {
James Smarta183a152011-10-10 21:32:43 -040016023 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040016024 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16025 }
16026 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16027 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16028 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16029 if (rc != MBX_TIMEOUT)
16030 lpfc_sli4_mbox_cmd_free(phba, mbox);
16031 if (shdr_status || shdr_add_status || rc) {
16032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16033 "2564 POST_SGL_BLOCK mailbox command failed "
16034 "status x%x add_status x%x mbx status x%x\n",
16035 shdr_status, shdr_add_status, rc);
16036 rc = -ENXIO;
16037 }
16038 return rc;
16039}
16040
James Smart2ea259e2017-02-12 13:52:27 -080016041static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
16042static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
16043
James Smart4f774512009-05-22 14:52:35 -040016044/**
16045 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16046 * @phba: pointer to lpfc_hba struct that the frame was received on
16047 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16048 *
16049 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16050 * valid type of frame that the LPFC driver will handle. This function will
16051 * return a zero if the frame is a valid frame or a non zero value when the
16052 * frame does not pass the check.
16053 **/
16054static int
16055lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16056{
Tomas Henzl474ffb72010-12-22 16:52:40 +010016057 /* make rctl_names static to save stack space */
James Smart4f774512009-05-22 14:52:35 -040016058 struct fc_vft_header *fc_vft_hdr;
James Smart546fc852011-03-11 16:06:29 -050016059 uint32_t *header = (uint32_t *) fc_hdr;
James Smart4f774512009-05-22 14:52:35 -040016060
16061 switch (fc_hdr->fh_r_ctl) {
16062 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16063 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16064 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16065 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16066 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16067 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16068 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16069 case FC_RCTL_DD_CMD_STATUS: /* command status */
16070 case FC_RCTL_ELS_REQ: /* extended link services request */
16071 case FC_RCTL_ELS_REP: /* extended link services reply */
16072 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16073 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16074 case FC_RCTL_BA_NOP: /* basic link service NOP */
16075 case FC_RCTL_BA_ABTS: /* basic link service abort */
16076 case FC_RCTL_BA_RMC: /* remove connection */
16077 case FC_RCTL_BA_ACC: /* basic accept */
16078 case FC_RCTL_BA_RJT: /* basic reject */
16079 case FC_RCTL_BA_PRMT:
16080 case FC_RCTL_ACK_1: /* acknowledge_1 */
16081 case FC_RCTL_ACK_0: /* acknowledge_0 */
16082 case FC_RCTL_P_RJT: /* port reject */
16083 case FC_RCTL_F_RJT: /* fabric reject */
16084 case FC_RCTL_P_BSY: /* port busy */
16085 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16086 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16087 case FC_RCTL_LCR: /* link credit reset */
16088 case FC_RCTL_END: /* end */
16089 break;
16090 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16091 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16092 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16093 return lpfc_fc_frame_check(phba, fc_hdr);
16094 default:
16095 goto drop;
16096 }
16097 switch (fc_hdr->fh_type) {
16098 case FC_TYPE_BLS:
16099 case FC_TYPE_ELS:
16100 case FC_TYPE_FCP:
16101 case FC_TYPE_CT:
James Smart895427b2017-02-12 13:52:30 -080016102 case FC_TYPE_NVME:
James Smart4f774512009-05-22 14:52:35 -040016103 break;
16104 case FC_TYPE_IP:
16105 case FC_TYPE_ILS:
16106 default:
16107 goto drop;
16108 }
James Smart546fc852011-03-11 16:06:29 -050016109
James Smart4f774512009-05-22 14:52:35 -040016110 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart88f43a02013-04-17 20:19:44 -040016111 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
16112 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
James Smart2ea259e2017-02-12 13:52:27 -080016113 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
16114 lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
James Smart546fc852011-03-11 16:06:29 -050016115 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16116 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
James Smart88f43a02013-04-17 20:19:44 -040016117 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16118 be32_to_cpu(header[6]));
James Smart4f774512009-05-22 14:52:35 -040016119 return 0;
16120drop:
16121 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16122 "2539 Dropped frame rctl:%s type:%s\n",
James Smart2ea259e2017-02-12 13:52:27 -080016123 lpfc_rctl_names[fc_hdr->fh_r_ctl],
16124 lpfc_type_names[fc_hdr->fh_type]);
James Smart4f774512009-05-22 14:52:35 -040016125 return 1;
16126}
16127
16128/**
16129 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16130 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16131 *
16132 * This function processes the FC header to retrieve the VFI from the VF
16133 * header, if one exists. This function will return the VFI if one exists
16134 * or 0 if no VSAN Header exists.
16135 **/
16136static uint32_t
16137lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16138{
16139 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16140
16141 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16142 return 0;
16143 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16144}
16145
16146/**
16147 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16148 * @phba: Pointer to the HBA structure to search for the vport on
16149 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16150 * @fcfi: The FC Fabric ID that the frame came from
16151 *
16152 * This function searches the @phba for a vport that matches the content of the
16153 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16154 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16155 * returns the matching vport pointer or NULL if unable to match frame to a
16156 * vport.
16157 **/
16158static struct lpfc_vport *
16159lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
James Smart895427b2017-02-12 13:52:30 -080016160 uint16_t fcfi, uint32_t did)
James Smart4f774512009-05-22 14:52:35 -040016161{
16162 struct lpfc_vport **vports;
16163 struct lpfc_vport *vport = NULL;
16164 int i;
James Smart939723a2012-05-09 21:19:03 -040016165
James Smartbf086112011-08-21 21:48:13 -040016166 if (did == Fabric_DID)
16167 return phba->pport;
James Smart939723a2012-05-09 21:19:03 -040016168 if ((phba->pport->fc_flag & FC_PT2PT) &&
16169 !(phba->link_state == LPFC_HBA_READY))
16170 return phba->pport;
16171
James Smart4f774512009-05-22 14:52:35 -040016172 vports = lpfc_create_vport_work_array(phba);
James Smart895427b2017-02-12 13:52:30 -080016173 if (vports != NULL) {
James Smart4f774512009-05-22 14:52:35 -040016174 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16175 if (phba->fcf.fcfi == fcfi &&
16176 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16177 vports[i]->fc_myDID == did) {
16178 vport = vports[i];
16179 break;
16180 }
16181 }
James Smart895427b2017-02-12 13:52:30 -080016182 }
James Smart4f774512009-05-22 14:52:35 -040016183 lpfc_destroy_vport_work_array(phba, vports);
16184 return vport;
16185}
16186
16187/**
James Smart45ed1192009-10-02 15:17:02 -040016188 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16189 * @vport: The vport to work on.
16190 *
16191 * This function updates the receive sequence time stamp for this vport. The
16192 * receive sequence time stamp indicates the time that the last frame of the
16193 * the sequence that has been idle for the longest amount of time was received.
16194 * the driver uses this time stamp to indicate if any received sequences have
16195 * timed out.
16196 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016197static void
James Smart45ed1192009-10-02 15:17:02 -040016198lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16199{
16200 struct lpfc_dmabuf *h_buf;
16201 struct hbq_dmabuf *dmabuf = NULL;
16202
16203 /* get the oldest sequence on the rcv list */
16204 h_buf = list_get_first(&vport->rcv_buffer_list,
16205 struct lpfc_dmabuf, list);
16206 if (!h_buf)
16207 return;
16208 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16209 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16210}
16211
16212/**
16213 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16214 * @vport: The vport that the received sequences were sent to.
16215 *
16216 * This function cleans up all outstanding received sequences. This is called
16217 * by the driver when a link event or user action invalidates all the received
16218 * sequences.
16219 **/
16220void
16221lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16222{
16223 struct lpfc_dmabuf *h_buf, *hnext;
16224 struct lpfc_dmabuf *d_buf, *dnext;
16225 struct hbq_dmabuf *dmabuf = NULL;
16226
16227 /* start with the oldest sequence on the rcv list */
16228 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16229 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16230 list_del_init(&dmabuf->hbuf.list);
16231 list_for_each_entry_safe(d_buf, dnext,
16232 &dmabuf->dbuf.list, list) {
16233 list_del_init(&d_buf->list);
16234 lpfc_in_buf_free(vport->phba, d_buf);
16235 }
16236 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16237 }
16238}
16239
16240/**
16241 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16242 * @vport: The vport that the received sequences were sent to.
16243 *
16244 * This function determines whether any received sequences have timed out by
16245 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16246 * indicates that there is at least one timed out sequence this routine will
16247 * go through the received sequences one at a time from most inactive to most
16248 * active to determine which ones need to be cleaned up. Once it has determined
16249 * that a sequence needs to be cleaned up it will simply free up the resources
16250 * without sending an abort.
16251 **/
16252void
16253lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16254{
16255 struct lpfc_dmabuf *h_buf, *hnext;
16256 struct lpfc_dmabuf *d_buf, *dnext;
16257 struct hbq_dmabuf *dmabuf = NULL;
16258 unsigned long timeout;
16259 int abort_count = 0;
16260
16261 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16262 vport->rcv_buffer_time_stamp);
16263 if (list_empty(&vport->rcv_buffer_list) ||
16264 time_before(jiffies, timeout))
16265 return;
16266 /* start with the oldest sequence on the rcv list */
16267 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16268 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16269 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16270 dmabuf->time_stamp);
16271 if (time_before(jiffies, timeout))
16272 break;
16273 abort_count++;
16274 list_del_init(&dmabuf->hbuf.list);
16275 list_for_each_entry_safe(d_buf, dnext,
16276 &dmabuf->dbuf.list, list) {
16277 list_del_init(&d_buf->list);
16278 lpfc_in_buf_free(vport->phba, d_buf);
16279 }
16280 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16281 }
16282 if (abort_count)
16283 lpfc_update_rcv_time_stamp(vport);
16284}
16285
16286/**
James Smart4f774512009-05-22 14:52:35 -040016287 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16288 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16289 *
16290 * This function searches through the existing incomplete sequences that have
16291 * been sent to this @vport. If the frame matches one of the incomplete
16292 * sequences then the dbuf in the @dmabuf is added to the list of frames that
16293 * make up that sequence. If no sequence is found that matches this frame then
16294 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16295 * This function returns a pointer to the first dmabuf in the sequence list that
16296 * the frame was linked to.
16297 **/
16298static struct hbq_dmabuf *
16299lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16300{
16301 struct fc_frame_header *new_hdr;
16302 struct fc_frame_header *temp_hdr;
16303 struct lpfc_dmabuf *d_buf;
16304 struct lpfc_dmabuf *h_buf;
16305 struct hbq_dmabuf *seq_dmabuf = NULL;
16306 struct hbq_dmabuf *temp_dmabuf = NULL;
James Smart4360ca92015-12-16 18:12:04 -050016307 uint8_t found = 0;
James Smart4f774512009-05-22 14:52:35 -040016308
James Smart4d9ab992009-10-02 15:16:39 -040016309 INIT_LIST_HEAD(&dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040016310 dmabuf->time_stamp = jiffies;
James Smart4f774512009-05-22 14:52:35 -040016311 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart4360ca92015-12-16 18:12:04 -050016312
James Smart4f774512009-05-22 14:52:35 -040016313 /* Use the hdr_buf to find the sequence that this frame belongs to */
16314 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16315 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16316 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16317 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16318 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16319 continue;
16320 /* found a pending sequence that matches this frame */
16321 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16322 break;
16323 }
16324 if (!seq_dmabuf) {
16325 /*
16326 * This indicates first frame received for this sequence.
16327 * Queue the buffer on the vport's rcv_buffer_list.
16328 */
16329 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
James Smart45ed1192009-10-02 15:17:02 -040016330 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040016331 return dmabuf;
16332 }
16333 temp_hdr = seq_dmabuf->hbuf.virt;
James Smarteeead812009-12-21 17:01:23 -050016334 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16335 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4d9ab992009-10-02 15:16:39 -040016336 list_del_init(&seq_dmabuf->hbuf.list);
16337 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16338 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040016339 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040016340 return dmabuf;
16341 }
James Smart45ed1192009-10-02 15:17:02 -040016342 /* move this sequence to the tail to indicate a young sequence */
16343 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16344 seq_dmabuf->time_stamp = jiffies;
16345 lpfc_update_rcv_time_stamp(vport);
James Smarteeead812009-12-21 17:01:23 -050016346 if (list_empty(&seq_dmabuf->dbuf.list)) {
16347 temp_hdr = dmabuf->hbuf.virt;
16348 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16349 return seq_dmabuf;
16350 }
James Smart4f774512009-05-22 14:52:35 -040016351 /* find the correct place in the sequence to insert this frame */
James Smart4360ca92015-12-16 18:12:04 -050016352 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16353 while (!found) {
James Smart4f774512009-05-22 14:52:35 -040016354 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16355 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16356 /*
16357 * If the frame's sequence count is greater than the frame on
16358 * the list then insert the frame right after this frame
16359 */
James Smarteeead812009-12-21 17:01:23 -050016360 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16361 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4f774512009-05-22 14:52:35 -040016362 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
James Smart4360ca92015-12-16 18:12:04 -050016363 found = 1;
16364 break;
James Smart4f774512009-05-22 14:52:35 -040016365 }
James Smart4360ca92015-12-16 18:12:04 -050016366
16367 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16368 break;
16369 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
James Smart4f774512009-05-22 14:52:35 -040016370 }
James Smart4360ca92015-12-16 18:12:04 -050016371
16372 if (found)
16373 return seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040016374 return NULL;
16375}
16376
16377/**
James Smart6669f9b2009-10-02 15:16:45 -040016378 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16379 * @vport: pointer to a vitural port
16380 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16381 *
16382 * This function tries to abort from the partially assembed sequence, described
16383 * by the information from basic abbort @dmabuf. It checks to see whether such
16384 * partially assembled sequence held by the driver. If so, it shall free up all
16385 * the frames from the partially assembled sequence.
16386 *
16387 * Return
16388 * true -- if there is matching partially assembled sequence present and all
16389 * the frames freed with the sequence;
16390 * false -- if there is no matching partially assembled sequence present so
16391 * nothing got aborted in the lower layer driver
16392 **/
16393static bool
16394lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16395 struct hbq_dmabuf *dmabuf)
16396{
16397 struct fc_frame_header *new_hdr;
16398 struct fc_frame_header *temp_hdr;
16399 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16400 struct hbq_dmabuf *seq_dmabuf = NULL;
16401
16402 /* Use the hdr_buf to find the sequence that matches this frame */
16403 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16404 INIT_LIST_HEAD(&dmabuf->hbuf.list);
16405 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16406 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16407 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16408 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16409 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16410 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16411 continue;
16412 /* found a pending sequence that matches this frame */
16413 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16414 break;
16415 }
16416
16417 /* Free up all the frames from the partially assembled sequence */
16418 if (seq_dmabuf) {
16419 list_for_each_entry_safe(d_buf, n_buf,
16420 &seq_dmabuf->dbuf.list, list) {
16421 list_del_init(&d_buf->list);
16422 lpfc_in_buf_free(vport->phba, d_buf);
16423 }
16424 return true;
16425 }
16426 return false;
16427}
16428
16429/**
James Smart6dd9e312013-01-03 15:43:37 -050016430 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16431 * @vport: pointer to a vitural port
16432 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16433 *
16434 * This function tries to abort from the assembed sequence from upper level
16435 * protocol, described by the information from basic abbort @dmabuf. It
16436 * checks to see whether such pending context exists at upper level protocol.
16437 * If so, it shall clean up the pending context.
16438 *
16439 * Return
16440 * true -- if there is matching pending context of the sequence cleaned
16441 * at ulp;
16442 * false -- if there is no matching pending context of the sequence present
16443 * at ulp.
16444 **/
16445static bool
16446lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16447{
16448 struct lpfc_hba *phba = vport->phba;
16449 int handled;
16450
16451 /* Accepting abort at ulp with SLI4 only */
16452 if (phba->sli_rev < LPFC_SLI_REV4)
16453 return false;
16454
16455 /* Register all caring upper level protocols to attend abort */
16456 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16457 if (handled)
16458 return true;
16459
16460 return false;
16461}
16462
16463/**
James Smart546fc852011-03-11 16:06:29 -050016464 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
James Smart6669f9b2009-10-02 15:16:45 -040016465 * @phba: Pointer to HBA context object.
16466 * @cmd_iocbq: pointer to the command iocbq structure.
16467 * @rsp_iocbq: pointer to the response iocbq structure.
16468 *
James Smart546fc852011-03-11 16:06:29 -050016469 * This function handles the sequence abort response iocb command complete
James Smart6669f9b2009-10-02 15:16:45 -040016470 * event. It properly releases the memory allocated to the sequence abort
16471 * accept iocb.
16472 **/
16473static void
James Smart546fc852011-03-11 16:06:29 -050016474lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
James Smart6669f9b2009-10-02 15:16:45 -040016475 struct lpfc_iocbq *cmd_iocbq,
16476 struct lpfc_iocbq *rsp_iocbq)
16477{
James Smart6dd9e312013-01-03 15:43:37 -050016478 struct lpfc_nodelist *ndlp;
16479
16480 if (cmd_iocbq) {
16481 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16482 lpfc_nlp_put(ndlp);
16483 lpfc_nlp_not_used(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040016484 lpfc_sli_release_iocbq(phba, cmd_iocbq);
James Smart6dd9e312013-01-03 15:43:37 -050016485 }
James Smart6b5151f2012-01-18 16:24:06 -050016486
16487 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
16488 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16490 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
16491 rsp_iocbq->iocb.ulpStatus,
16492 rsp_iocbq->iocb.un.ulpWord[4]);
James Smart6669f9b2009-10-02 15:16:45 -040016493}
16494
16495/**
James Smart6d368e52011-05-24 11:44:12 -040016496 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
16497 * @phba: Pointer to HBA context object.
16498 * @xri: xri id in transaction.
16499 *
16500 * This function validates the xri maps to the known range of XRIs allocated an
16501 * used by the driver.
16502 **/
James Smart7851fe22011-07-22 18:36:52 -040016503uint16_t
James Smart6d368e52011-05-24 11:44:12 -040016504lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16505 uint16_t xri)
16506{
James Smarta2fc4aef2014-09-03 12:57:55 -040016507 uint16_t i;
James Smart6d368e52011-05-24 11:44:12 -040016508
16509 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16510 if (xri == phba->sli4_hba.xri_ids[i])
16511 return i;
16512 }
16513 return NO_XRI;
16514}
16515
James Smart6d368e52011-05-24 11:44:12 -040016516/**
James Smart546fc852011-03-11 16:06:29 -050016517 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040016518 * @phba: Pointer to HBA context object.
16519 * @fc_hdr: pointer to a FC frame header.
16520 *
James Smart546fc852011-03-11 16:06:29 -050016521 * This function sends a basic response to a previous unsol sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040016522 * event after aborting the sequence handling.
16523 **/
James Smart86c67372017-04-21 16:05:04 -070016524void
James Smart6dd9e312013-01-03 15:43:37 -050016525lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16526 struct fc_frame_header *fc_hdr, bool aborted)
James Smart6669f9b2009-10-02 15:16:45 -040016527{
James Smart6dd9e312013-01-03 15:43:37 -050016528 struct lpfc_hba *phba = vport->phba;
James Smart6669f9b2009-10-02 15:16:45 -040016529 struct lpfc_iocbq *ctiocb = NULL;
16530 struct lpfc_nodelist *ndlp;
James Smartee0f4fe2012-05-09 21:19:14 -040016531 uint16_t oxid, rxid, xri, lxri;
James Smart5ffc2662009-11-18 15:39:44 -050016532 uint32_t sid, fctl;
James Smart6669f9b2009-10-02 15:16:45 -040016533 IOCB_t *icmd;
James Smart546fc852011-03-11 16:06:29 -050016534 int rc;
James Smart6669f9b2009-10-02 15:16:45 -040016535
16536 if (!lpfc_is_link_up(phba))
16537 return;
16538
16539 sid = sli4_sid_from_fc_hdr(fc_hdr);
16540 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
James Smart5ffc2662009-11-18 15:39:44 -050016541 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
James Smart6669f9b2009-10-02 15:16:45 -040016542
James Smart6dd9e312013-01-03 15:43:37 -050016543 ndlp = lpfc_findnode_did(vport, sid);
James Smart6669f9b2009-10-02 15:16:45 -040016544 if (!ndlp) {
James Smart9d3d3402017-04-21 16:05:00 -070016545 ndlp = lpfc_nlp_init(vport, sid);
James Smart6dd9e312013-01-03 15:43:37 -050016546 if (!ndlp) {
16547 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16548 "1268 Failed to allocate ndlp for "
16549 "oxid:x%x SID:x%x\n", oxid, sid);
16550 return;
16551 }
James Smart6dd9e312013-01-03 15:43:37 -050016552 /* Put ndlp onto pport node list */
16553 lpfc_enqueue_node(vport, ndlp);
16554 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
16555 /* re-setup ndlp without removing from node list */
16556 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
16557 if (!ndlp) {
16558 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16559 "3275 Failed to active ndlp found "
16560 "for oxid:x%x SID:x%x\n", oxid, sid);
16561 return;
16562 }
James Smart6669f9b2009-10-02 15:16:45 -040016563 }
16564
James Smart546fc852011-03-11 16:06:29 -050016565 /* Allocate buffer for rsp iocb */
James Smart6669f9b2009-10-02 15:16:45 -040016566 ctiocb = lpfc_sli_get_iocbq(phba);
16567 if (!ctiocb)
16568 return;
16569
James Smart5ffc2662009-11-18 15:39:44 -050016570 /* Extract the F_CTL field from FC_HDR */
16571 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
16572
James Smart6669f9b2009-10-02 15:16:45 -040016573 icmd = &ctiocb->iocb;
James Smart6669f9b2009-10-02 15:16:45 -040016574 icmd->un.xseq64.bdl.bdeSize = 0;
James Smart5ffc2662009-11-18 15:39:44 -050016575 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
James Smart6669f9b2009-10-02 15:16:45 -040016576 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
16577 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
16578 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
16579
16580 /* Fill in the rest of iocb fields */
16581 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
16582 icmd->ulpBdeCount = 0;
16583 icmd->ulpLe = 1;
16584 icmd->ulpClass = CLASS3;
James Smart6d368e52011-05-24 11:44:12 -040016585 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
James Smart6dd9e312013-01-03 15:43:37 -050016586 ctiocb->context1 = lpfc_nlp_get(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040016587
James Smart6669f9b2009-10-02 15:16:45 -040016588 ctiocb->iocb_cmpl = NULL;
16589 ctiocb->vport = phba->pport;
James Smart546fc852011-03-11 16:06:29 -050016590 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
James Smart6d368e52011-05-24 11:44:12 -040016591 ctiocb->sli4_lxritag = NO_XRI;
James Smart546fc852011-03-11 16:06:29 -050016592 ctiocb->sli4_xritag = NO_XRI;
16593
James Smartee0f4fe2012-05-09 21:19:14 -040016594 if (fctl & FC_FC_EX_CTX)
16595 /* Exchange responder sent the abort so we
16596 * own the oxid.
16597 */
16598 xri = oxid;
16599 else
16600 xri = rxid;
16601 lxri = lpfc_sli4_xri_inrange(phba, xri);
16602 if (lxri != NO_XRI)
16603 lpfc_set_rrq_active(phba, ndlp, lxri,
16604 (xri == oxid) ? rxid : oxid, 0);
James Smart6dd9e312013-01-03 15:43:37 -050016605 /* For BA_ABTS from exchange responder, if the logical xri with
16606 * the oxid maps to the FCP XRI range, the port no longer has
16607 * that exchange context, send a BLS_RJT. Override the IOCB for
16608 * a BA_RJT.
James Smart546fc852011-03-11 16:06:29 -050016609 */
James Smart6dd9e312013-01-03 15:43:37 -050016610 if ((fctl & FC_FC_EX_CTX) &&
James Smart895427b2017-02-12 13:52:30 -080016611 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
James Smart6dd9e312013-01-03 15:43:37 -050016612 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16613 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16614 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16615 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16616 }
16617
16618 /* If BA_ABTS failed to abort a partially assembled receive sequence,
16619 * the driver no longer has that exchange, send a BLS_RJT. Override
16620 * the IOCB for a BA_RJT.
16621 */
16622 if (aborted == false) {
James Smart546fc852011-03-11 16:06:29 -050016623 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16624 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16625 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16626 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16627 }
James Smart6669f9b2009-10-02 15:16:45 -040016628
James Smart5ffc2662009-11-18 15:39:44 -050016629 if (fctl & FC_FC_EX_CTX) {
16630 /* ABTS sent by responder to CT exchange, construction
16631 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
16632 * field and RX_ID from ABTS for RX_ID field.
16633 */
James Smart546fc852011-03-11 16:06:29 -050016634 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
James Smart5ffc2662009-11-18 15:39:44 -050016635 } else {
16636 /* ABTS sent by initiator to CT exchange, construction
16637 * of BA_ACC will need to allocate a new XRI as for the
James Smartf09c3ac2012-03-01 22:33:29 -050016638 * XRI_TAG field.
James Smart5ffc2662009-11-18 15:39:44 -050016639 */
James Smart546fc852011-03-11 16:06:29 -050016640 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
James Smart5ffc2662009-11-18 15:39:44 -050016641 }
James Smartf09c3ac2012-03-01 22:33:29 -050016642 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
James Smart546fc852011-03-11 16:06:29 -050016643 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
James Smart5ffc2662009-11-18 15:39:44 -050016644
James Smart546fc852011-03-11 16:06:29 -050016645 /* Xmit CT abts response on exchange <xid> */
James Smart6dd9e312013-01-03 15:43:37 -050016646 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
16647 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
16648 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
James Smart546fc852011-03-11 16:06:29 -050016649
16650 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
16651 if (rc == IOCB_ERROR) {
James Smart6dd9e312013-01-03 15:43:37 -050016652 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
16653 "2925 Failed to issue CT ABTS RSP x%x on "
16654 "xri x%x, Data x%x\n",
16655 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
16656 phba->link_state);
16657 lpfc_nlp_put(ndlp);
16658 ctiocb->context1 = NULL;
James Smart546fc852011-03-11 16:06:29 -050016659 lpfc_sli_release_iocbq(phba, ctiocb);
16660 }
James Smart6669f9b2009-10-02 15:16:45 -040016661}
16662
16663/**
16664 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
16665 * @vport: Pointer to the vport on which this sequence was received
16666 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16667 *
16668 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
16669 * receive sequence is only partially assembed by the driver, it shall abort
16670 * the partially assembled frames for the sequence. Otherwise, if the
16671 * unsolicited receive sequence has been completely assembled and passed to
16672 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
16673 * unsolicited sequence has been aborted. After that, it will issue a basic
16674 * accept to accept the abort.
16675 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016676static void
James Smart6669f9b2009-10-02 15:16:45 -040016677lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
16678 struct hbq_dmabuf *dmabuf)
16679{
16680 struct lpfc_hba *phba = vport->phba;
16681 struct fc_frame_header fc_hdr;
James Smart5ffc2662009-11-18 15:39:44 -050016682 uint32_t fctl;
James Smart6dd9e312013-01-03 15:43:37 -050016683 bool aborted;
James Smart6669f9b2009-10-02 15:16:45 -040016684
James Smart6669f9b2009-10-02 15:16:45 -040016685 /* Make a copy of fc_hdr before the dmabuf being released */
16686 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
James Smart5ffc2662009-11-18 15:39:44 -050016687 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
James Smart6669f9b2009-10-02 15:16:45 -040016688
James Smart5ffc2662009-11-18 15:39:44 -050016689 if (fctl & FC_FC_EX_CTX) {
James Smart6dd9e312013-01-03 15:43:37 -050016690 /* ABTS by responder to exchange, no cleanup needed */
16691 aborted = true;
James Smart5ffc2662009-11-18 15:39:44 -050016692 } else {
James Smart6dd9e312013-01-03 15:43:37 -050016693 /* ABTS by initiator to exchange, need to do cleanup */
16694 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
16695 if (aborted == false)
16696 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
James Smart5ffc2662009-11-18 15:39:44 -050016697 }
James Smart6dd9e312013-01-03 15:43:37 -050016698 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16699
James Smart86c67372017-04-21 16:05:04 -070016700 if (phba->nvmet_support) {
16701 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
16702 return;
16703 }
16704
James Smart6dd9e312013-01-03 15:43:37 -050016705 /* Respond with BA_ACC or BA_RJT accordingly */
16706 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
James Smart6669f9b2009-10-02 15:16:45 -040016707}
16708
16709/**
James Smart4f774512009-05-22 14:52:35 -040016710 * lpfc_seq_complete - Indicates if a sequence is complete
16711 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16712 *
16713 * This function checks the sequence, starting with the frame described by
16714 * @dmabuf, to see if all the frames associated with this sequence are present.
16715 * the frames associated with this sequence are linked to the @dmabuf using the
16716 * dbuf list. This function looks for two major things. 1) That the first frame
16717 * has a sequence count of zero. 2) There is a frame with last frame of sequence
16718 * set. 3) That there are no holes in the sequence count. The function will
16719 * return 1 when the sequence is complete, otherwise it will return 0.
16720 **/
16721static int
16722lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
16723{
16724 struct fc_frame_header *hdr;
16725 struct lpfc_dmabuf *d_buf;
16726 struct hbq_dmabuf *seq_dmabuf;
16727 uint32_t fctl;
16728 int seq_count = 0;
16729
16730 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16731 /* make sure first fame of sequence has a sequence count of zero */
16732 if (hdr->fh_seq_cnt != seq_count)
16733 return 0;
16734 fctl = (hdr->fh_f_ctl[0] << 16 |
16735 hdr->fh_f_ctl[1] << 8 |
16736 hdr->fh_f_ctl[2]);
16737 /* If last frame of sequence we can return success. */
16738 if (fctl & FC_FC_END_SEQ)
16739 return 1;
16740 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
16741 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16742 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16743 /* If there is a hole in the sequence count then fail. */
James Smarteeead812009-12-21 17:01:23 -050016744 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
James Smart4f774512009-05-22 14:52:35 -040016745 return 0;
16746 fctl = (hdr->fh_f_ctl[0] << 16 |
16747 hdr->fh_f_ctl[1] << 8 |
16748 hdr->fh_f_ctl[2]);
16749 /* If last frame of sequence we can return success. */
16750 if (fctl & FC_FC_END_SEQ)
16751 return 1;
16752 }
16753 return 0;
16754}
16755
16756/**
16757 * lpfc_prep_seq - Prep sequence for ULP processing
16758 * @vport: Pointer to the vport on which this sequence was received
16759 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16760 *
16761 * This function takes a sequence, described by a list of frames, and creates
16762 * a list of iocbq structures to describe the sequence. This iocbq list will be
16763 * used to issue to the generic unsolicited sequence handler. This routine
16764 * returns a pointer to the first iocbq in the list. If the function is unable
16765 * to allocate an iocbq then it throw out the received frames that were not
16766 * able to be described and return a pointer to the first iocbq. If unable to
16767 * allocate any iocbqs (including the first) this function will return NULL.
16768 **/
16769static struct lpfc_iocbq *
16770lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
16771{
James Smart7851fe22011-07-22 18:36:52 -040016772 struct hbq_dmabuf *hbq_buf;
James Smart4f774512009-05-22 14:52:35 -040016773 struct lpfc_dmabuf *d_buf, *n_buf;
16774 struct lpfc_iocbq *first_iocbq, *iocbq;
16775 struct fc_frame_header *fc_hdr;
16776 uint32_t sid;
James Smart7851fe22011-07-22 18:36:52 -040016777 uint32_t len, tot_len;
James Smarteeead812009-12-21 17:01:23 -050016778 struct ulp_bde64 *pbde;
James Smart4f774512009-05-22 14:52:35 -040016779
16780 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16781 /* remove from receive buffer list */
16782 list_del_init(&seq_dmabuf->hbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040016783 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040016784 /* get the Remote Port's SID */
James Smart6669f9b2009-10-02 15:16:45 -040016785 sid = sli4_sid_from_fc_hdr(fc_hdr);
James Smart7851fe22011-07-22 18:36:52 -040016786 tot_len = 0;
James Smart4f774512009-05-22 14:52:35 -040016787 /* Get an iocbq struct to fill in. */
16788 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
16789 if (first_iocbq) {
16790 /* Initialize the first IOCB. */
James Smart8fa38512009-07-19 10:01:03 -040016791 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
James Smart4f774512009-05-22 14:52:35 -040016792 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
James Smart895427b2017-02-12 13:52:30 -080016793 first_iocbq->vport = vport;
James Smart939723a2012-05-09 21:19:03 -040016794
16795 /* Check FC Header to see what TYPE of frame we are rcv'ing */
16796 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
16797 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
16798 first_iocbq->iocb.un.rcvels.parmRo =
16799 sli4_did_from_fc_hdr(fc_hdr);
16800 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
16801 } else
16802 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
James Smart7851fe22011-07-22 18:36:52 -040016803 first_iocbq->iocb.ulpContext = NO_XRI;
16804 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
16805 be16_to_cpu(fc_hdr->fh_ox_id);
16806 /* iocbq is prepped for internal consumption. Physical vpi. */
16807 first_iocbq->iocb.unsli3.rcvsli3.vpi =
16808 vport->phba->vpi_ids[vport->vpi];
James Smart4f774512009-05-22 14:52:35 -040016809 /* put the first buffer into the first IOCBq */
James Smart48a5a662013-07-15 18:32:28 -040016810 tot_len = bf_get(lpfc_rcqe_length,
16811 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
16812
James Smart4f774512009-05-22 14:52:35 -040016813 first_iocbq->context2 = &seq_dmabuf->dbuf;
16814 first_iocbq->context3 = NULL;
16815 first_iocbq->iocb.ulpBdeCount = 1;
James Smart48a5a662013-07-15 18:32:28 -040016816 if (tot_len > LPFC_DATA_BUF_SIZE)
16817 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
James Smart4f774512009-05-22 14:52:35 -040016818 LPFC_DATA_BUF_SIZE;
James Smart48a5a662013-07-15 18:32:28 -040016819 else
16820 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
16821
James Smart4f774512009-05-22 14:52:35 -040016822 first_iocbq->iocb.un.rcvels.remoteID = sid;
James Smart48a5a662013-07-15 18:32:28 -040016823
James Smart7851fe22011-07-22 18:36:52 -040016824 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
James Smart4f774512009-05-22 14:52:35 -040016825 }
16826 iocbq = first_iocbq;
16827 /*
16828 * Each IOCBq can have two Buffers assigned, so go through the list
16829 * of buffers for this sequence and save two buffers in each IOCBq
16830 */
16831 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
16832 if (!iocbq) {
16833 lpfc_in_buf_free(vport->phba, d_buf);
16834 continue;
16835 }
16836 if (!iocbq->context3) {
16837 iocbq->context3 = d_buf;
16838 iocbq->iocb.ulpBdeCount++;
James Smart7851fe22011-07-22 18:36:52 -040016839 /* We need to get the size out of the right CQE */
16840 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16841 len = bf_get(lpfc_rcqe_length,
16842 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040016843 pbde = (struct ulp_bde64 *)
16844 &iocbq->iocb.unsli3.sli3Words[4];
16845 if (len > LPFC_DATA_BUF_SIZE)
16846 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
16847 else
16848 pbde->tus.f.bdeSize = len;
16849
James Smart7851fe22011-07-22 18:36:52 -040016850 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
16851 tot_len += len;
James Smart4f774512009-05-22 14:52:35 -040016852 } else {
16853 iocbq = lpfc_sli_get_iocbq(vport->phba);
16854 if (!iocbq) {
16855 if (first_iocbq) {
16856 first_iocbq->iocb.ulpStatus =
16857 IOSTAT_FCP_RSP_ERROR;
16858 first_iocbq->iocb.un.ulpWord[4] =
16859 IOERR_NO_RESOURCES;
16860 }
16861 lpfc_in_buf_free(vport->phba, d_buf);
16862 continue;
16863 }
James Smart7851fe22011-07-22 18:36:52 -040016864 /* We need to get the size out of the right CQE */
16865 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16866 len = bf_get(lpfc_rcqe_length,
16867 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040016868 iocbq->context2 = d_buf;
16869 iocbq->context3 = NULL;
16870 iocbq->iocb.ulpBdeCount = 1;
16871 if (len > LPFC_DATA_BUF_SIZE)
16872 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
16873 LPFC_DATA_BUF_SIZE;
16874 else
16875 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
16876
James Smart7851fe22011-07-22 18:36:52 -040016877 tot_len += len;
16878 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16879
James Smart4f774512009-05-22 14:52:35 -040016880 iocbq->iocb.un.rcvels.remoteID = sid;
16881 list_add_tail(&iocbq->list, &first_iocbq->list);
16882 }
16883 }
16884 return first_iocbq;
16885}
16886
James Smart6669f9b2009-10-02 15:16:45 -040016887static void
16888lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
16889 struct hbq_dmabuf *seq_dmabuf)
16890{
16891 struct fc_frame_header *fc_hdr;
16892 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
16893 struct lpfc_hba *phba = vport->phba;
16894
16895 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16896 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
16897 if (!iocbq) {
16898 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16899 "2707 Ring %d handler: Failed to allocate "
16900 "iocb Rctl x%x Type x%x received\n",
16901 LPFC_ELS_RING,
16902 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16903 return;
16904 }
16905 if (!lpfc_complete_unsol_iocb(phba,
James Smart895427b2017-02-12 13:52:30 -080016906 phba->sli4_hba.els_wq->pring,
James Smart6669f9b2009-10-02 15:16:45 -040016907 iocbq, fc_hdr->fh_r_ctl,
16908 fc_hdr->fh_type))
James Smart6d368e52011-05-24 11:44:12 -040016909 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart6669f9b2009-10-02 15:16:45 -040016910 "2540 Ring %d handler: unexpected Rctl "
16911 "x%x Type x%x received\n",
16912 LPFC_ELS_RING,
16913 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16914
16915 /* Free iocb created in lpfc_prep_seq */
16916 list_for_each_entry_safe(curr_iocb, next_iocb,
16917 &iocbq->list, list) {
16918 list_del_init(&curr_iocb->list);
16919 lpfc_sli_release_iocbq(phba, curr_iocb);
16920 }
16921 lpfc_sli_release_iocbq(phba, iocbq);
16922}
16923
James Smart4f774512009-05-22 14:52:35 -040016924/**
16925 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
16926 * @phba: Pointer to HBA context object.
16927 *
16928 * This function is called with no lock held. This function processes all
16929 * the received buffers and gives it to upper layers when a received buffer
16930 * indicates that it is the final frame in the sequence. The interrupt
James Smart895427b2017-02-12 13:52:30 -080016931 * service routine processes received buffers at interrupt contexts.
James Smart4f774512009-05-22 14:52:35 -040016932 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
16933 * appropriate receive function when the final frame in a sequence is received.
16934 **/
James Smart4d9ab992009-10-02 15:16:39 -040016935void
16936lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
16937 struct hbq_dmabuf *dmabuf)
James Smart4f774512009-05-22 14:52:35 -040016938{
James Smart4d9ab992009-10-02 15:16:39 -040016939 struct hbq_dmabuf *seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040016940 struct fc_frame_header *fc_hdr;
16941 struct lpfc_vport *vport;
16942 uint32_t fcfi;
James Smart939723a2012-05-09 21:19:03 -040016943 uint32_t did;
James Smart4f774512009-05-22 14:52:35 -040016944
James Smart4f774512009-05-22 14:52:35 -040016945 /* Process each received buffer */
James Smart4d9ab992009-10-02 15:16:39 -040016946 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart2ea259e2017-02-12 13:52:27 -080016947
James Smart4d9ab992009-10-02 15:16:39 -040016948 /* check to see if this a valid type of frame */
16949 if (lpfc_fc_frame_check(phba, fc_hdr)) {
16950 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16951 return;
16952 }
James Smart2ea259e2017-02-12 13:52:27 -080016953
James Smart7851fe22011-07-22 18:36:52 -040016954 if ((bf_get(lpfc_cqe_code,
16955 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
16956 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
16957 &dmabuf->cq_event.cqe.rcqe_cmpl);
16958 else
16959 fcfi = bf_get(lpfc_rcqe_fcf_id,
16960 &dmabuf->cq_event.cqe.rcqe_cmpl);
James Smart939723a2012-05-09 21:19:03 -040016961
James Smart895427b2017-02-12 13:52:30 -080016962 /* d_id this frame is directed to */
16963 did = sli4_did_from_fc_hdr(fc_hdr);
16964
16965 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
James Smart939723a2012-05-09 21:19:03 -040016966 if (!vport) {
James Smart4d9ab992009-10-02 15:16:39 -040016967 /* throw out the frame */
16968 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16969 return;
16970 }
James Smart939723a2012-05-09 21:19:03 -040016971
James Smart939723a2012-05-09 21:19:03 -040016972 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
16973 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
16974 (did != Fabric_DID)) {
16975 /*
16976 * Throw out the frame if we are not pt2pt.
16977 * The pt2pt protocol allows for discovery frames
16978 * to be received without a registered VPI.
16979 */
16980 if (!(vport->fc_flag & FC_PT2PT) ||
16981 (phba->link_state == LPFC_HBA_READY)) {
16982 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16983 return;
16984 }
16985 }
16986
James Smart6669f9b2009-10-02 15:16:45 -040016987 /* Handle the basic abort sequence (BA_ABTS) event */
16988 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
16989 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
16990 return;
16991 }
16992
James Smart4d9ab992009-10-02 15:16:39 -040016993 /* Link this frame */
16994 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
16995 if (!seq_dmabuf) {
16996 /* unable to add frame to vport - throw it out */
16997 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16998 return;
16999 }
17000 /* If not last frame in sequence continue processing frames. */
James Smartdef9c7a2009-12-21 17:02:28 -050017001 if (!lpfc_seq_complete(seq_dmabuf))
James Smart4d9ab992009-10-02 15:16:39 -040017002 return;
James Smartdef9c7a2009-12-21 17:02:28 -050017003
James Smart6669f9b2009-10-02 15:16:45 -040017004 /* Send the complete sequence to the upper layer protocol */
17005 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
James Smart4f774512009-05-22 14:52:35 -040017006}
James Smart6fb120a2009-05-22 14:52:59 -040017007
17008/**
17009 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17010 * @phba: pointer to lpfc hba data structure.
17011 *
17012 * This routine is invoked to post rpi header templates to the
17013 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040017014 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17015 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040017016 *
17017 * This routine does not require any locks. It's usage is expected
17018 * to be driver load or reset recovery when the driver is
17019 * sequential.
17020 *
17021 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020017022 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040017023 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040017024 * When this error occurs, the driver is not guaranteed
17025 * to have any rpi regions posted to the device and
17026 * must either attempt to repost the regions or take a
17027 * fatal error.
17028 **/
17029int
17030lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17031{
17032 struct lpfc_rpi_hdr *rpi_page;
17033 uint32_t rc = 0;
James Smart6d368e52011-05-24 11:44:12 -040017034 uint16_t lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040017035
James Smart6d368e52011-05-24 11:44:12 -040017036 /* SLI4 ports that support extents do not require RPI headers. */
17037 if (!phba->sli4_hba.rpi_hdrs_in_use)
17038 goto exit;
17039 if (phba->sli4_hba.extents_in_use)
17040 return -EIO;
17041
James Smart6fb120a2009-05-22 14:52:59 -040017042 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
James Smart6d368e52011-05-24 11:44:12 -040017043 /*
17044 * Assign the rpi headers a physical rpi only if the driver
17045 * has not initialized those resources. A port reset only
17046 * needs the headers posted.
17047 */
17048 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17049 LPFC_RPI_RSRC_RDY)
17050 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17051
James Smart6fb120a2009-05-22 14:52:59 -040017052 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17053 if (rc != MBX_SUCCESS) {
17054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17055 "2008 Error %d posting all rpi "
17056 "headers\n", rc);
17057 rc = -EIO;
17058 break;
17059 }
17060 }
17061
James Smart6d368e52011-05-24 11:44:12 -040017062 exit:
17063 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17064 LPFC_RPI_RSRC_RDY);
James Smart6fb120a2009-05-22 14:52:59 -040017065 return rc;
17066}
17067
17068/**
17069 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17070 * @phba: pointer to lpfc hba data structure.
17071 * @rpi_page: pointer to the rpi memory region.
17072 *
17073 * This routine is invoked to post a single rpi header to the
17074 * HBA consistent with the SLI-4 interface spec. This memory region
17075 * maps up to 64 rpi context regions.
17076 *
17077 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020017078 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040017079 * -ENOMEM - No available memory
17080 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040017081 **/
17082int
17083lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17084{
17085 LPFC_MBOXQ_t *mboxq;
17086 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17087 uint32_t rc = 0;
James Smart6fb120a2009-05-22 14:52:59 -040017088 uint32_t shdr_status, shdr_add_status;
17089 union lpfc_sli4_cfg_shdr *shdr;
17090
James Smart6d368e52011-05-24 11:44:12 -040017091 /* SLI4 ports that support extents do not require RPI headers. */
17092 if (!phba->sli4_hba.rpi_hdrs_in_use)
17093 return rc;
17094 if (phba->sli4_hba.extents_in_use)
17095 return -EIO;
17096
James Smart6fb120a2009-05-22 14:52:59 -040017097 /* The port is notified of the header region via a mailbox command. */
17098 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17099 if (!mboxq) {
17100 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17101 "2001 Unable to allocate memory for issuing "
17102 "SLI_CONFIG_SPECIAL mailbox command\n");
17103 return -ENOMEM;
17104 }
17105
17106 /* Post all rpi memory regions to the port. */
17107 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
James Smart6fb120a2009-05-22 14:52:59 -040017108 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17109 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17110 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
James Smartfedd3b72011-02-16 12:39:24 -050017111 sizeof(struct lpfc_sli4_cfg_mhdr),
17112 LPFC_SLI4_MBX_EMBED);
James Smart6d368e52011-05-24 11:44:12 -040017113
17114
17115 /* Post the physical rpi to the port for this rpi header. */
James Smart6fb120a2009-05-22 14:52:59 -040017116 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17117 rpi_page->start_rpi);
James Smart6d368e52011-05-24 11:44:12 -040017118 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17119 hdr_tmpl, rpi_page->page_count);
17120
James Smart6fb120a2009-05-22 14:52:59 -040017121 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17122 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
James Smartf1126682009-06-10 17:22:44 -040017123 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart6fb120a2009-05-22 14:52:59 -040017124 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17125 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17126 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17127 if (rc != MBX_TIMEOUT)
17128 mempool_free(mboxq, phba->mbox_mem_pool);
17129 if (shdr_status || shdr_add_status || rc) {
17130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17131 "2514 POST_RPI_HDR mailbox failed with "
17132 "status x%x add_status x%x, mbx status x%x\n",
17133 shdr_status, shdr_add_status, rc);
17134 rc = -ENXIO;
17135 }
17136 return rc;
17137}
17138
17139/**
17140 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17141 * @phba: pointer to lpfc hba data structure.
17142 *
17143 * This routine is invoked to post rpi header templates to the
17144 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040017145 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17146 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040017147 *
17148 * Returns
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020017149 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
James Smart6fb120a2009-05-22 14:52:59 -040017150 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17151 **/
17152int
17153lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17154{
James Smart6d368e52011-05-24 11:44:12 -040017155 unsigned long rpi;
17156 uint16_t max_rpi, rpi_limit;
17157 uint16_t rpi_remaining, lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040017158 struct lpfc_rpi_hdr *rpi_hdr;
James Smart4902b382013-10-10 12:20:35 -040017159 unsigned long iflag;
James Smart6fb120a2009-05-22 14:52:59 -040017160
James Smart6fb120a2009-05-22 14:52:59 -040017161 /*
James Smart6d368e52011-05-24 11:44:12 -040017162 * Fetch the next logical rpi. Because this index is logical,
17163 * the driver starts at 0 each time.
James Smart6fb120a2009-05-22 14:52:59 -040017164 */
James Smart4902b382013-10-10 12:20:35 -040017165 spin_lock_irqsave(&phba->hbalock, iflag);
James Smartbe6bb942015-04-07 15:07:22 -040017166 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17167 rpi_limit = phba->sli4_hba.next_rpi;
17168
James Smart6d368e52011-05-24 11:44:12 -040017169 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17170 if (rpi >= rpi_limit)
James Smart6fb120a2009-05-22 14:52:59 -040017171 rpi = LPFC_RPI_ALLOC_ERROR;
17172 else {
17173 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17174 phba->sli4_hba.max_cfg_param.rpi_used++;
17175 phba->sli4_hba.rpi_count++;
17176 }
James Smartbe6bb942015-04-07 15:07:22 -040017177 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17178 "0001 rpi:%x max:%x lim:%x\n",
17179 (int) rpi, max_rpi, rpi_limit);
James Smart6fb120a2009-05-22 14:52:59 -040017180
17181 /*
17182 * Don't try to allocate more rpi header regions if the device limit
James Smart6d368e52011-05-24 11:44:12 -040017183 * has been exhausted.
James Smart6fb120a2009-05-22 14:52:59 -040017184 */
17185 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17186 (phba->sli4_hba.rpi_count >= max_rpi)) {
James Smart4902b382013-10-10 12:20:35 -040017187 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040017188 return rpi;
17189 }
17190
17191 /*
James Smart6d368e52011-05-24 11:44:12 -040017192 * RPI header postings are not required for SLI4 ports capable of
17193 * extents.
17194 */
17195 if (!phba->sli4_hba.rpi_hdrs_in_use) {
James Smart4902b382013-10-10 12:20:35 -040017196 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6d368e52011-05-24 11:44:12 -040017197 return rpi;
17198 }
17199
17200 /*
James Smart6fb120a2009-05-22 14:52:59 -040017201 * If the driver is running low on rpi resources, allocate another
17202 * page now. Note that the next_rpi value is used because
17203 * it represents how many are actually in use whereas max_rpi notes
17204 * how many are supported max by the device.
17205 */
James Smart6d368e52011-05-24 11:44:12 -040017206 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
James Smart4902b382013-10-10 12:20:35 -040017207 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040017208 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17209 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17210 if (!rpi_hdr) {
17211 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17212 "2002 Error Could not grow rpi "
17213 "count\n");
17214 } else {
James Smart6d368e52011-05-24 11:44:12 -040017215 lrpi = rpi_hdr->start_rpi;
17216 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
James Smart6fb120a2009-05-22 14:52:59 -040017217 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17218 }
17219 }
17220
17221 return rpi;
17222}
17223
17224/**
17225 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17226 * @phba: pointer to lpfc hba data structure.
17227 *
17228 * This routine is invoked to release an rpi to the pool of
17229 * available rpis maintained by the driver.
17230 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017231static void
James Smartd7c47992010-06-08 18:31:54 -040017232__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17233{
17234 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17235 phba->sli4_hba.rpi_count--;
17236 phba->sli4_hba.max_cfg_param.rpi_used--;
17237 }
17238}
17239
17240/**
17241 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17242 * @phba: pointer to lpfc hba data structure.
17243 *
17244 * This routine is invoked to release an rpi to the pool of
17245 * available rpis maintained by the driver.
17246 **/
17247void
James Smart6fb120a2009-05-22 14:52:59 -040017248lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17249{
17250 spin_lock_irq(&phba->hbalock);
James Smartd7c47992010-06-08 18:31:54 -040017251 __lpfc_sli4_free_rpi(phba, rpi);
James Smart6fb120a2009-05-22 14:52:59 -040017252 spin_unlock_irq(&phba->hbalock);
17253}
17254
17255/**
17256 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17257 * @phba: pointer to lpfc hba data structure.
17258 *
17259 * This routine is invoked to remove the memory region that
17260 * provided rpi via a bitmask.
17261 **/
17262void
17263lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17264{
17265 kfree(phba->sli4_hba.rpi_bmask);
James Smart6d368e52011-05-24 11:44:12 -040017266 kfree(phba->sli4_hba.rpi_ids);
17267 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
James Smart6fb120a2009-05-22 14:52:59 -040017268}
17269
17270/**
17271 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17272 * @phba: pointer to lpfc hba data structure.
17273 *
17274 * This routine is invoked to remove the memory region that
17275 * provided rpi via a bitmask.
17276 **/
17277int
James Smart6b5151f2012-01-18 16:24:06 -050017278lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17279 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
James Smart6fb120a2009-05-22 14:52:59 -040017280{
17281 LPFC_MBOXQ_t *mboxq;
17282 struct lpfc_hba *phba = ndlp->phba;
17283 int rc;
17284
17285 /* The port is notified of the header region via a mailbox command. */
17286 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17287 if (!mboxq)
17288 return -ENOMEM;
17289
17290 /* Post all rpi memory regions to the port. */
17291 lpfc_resume_rpi(mboxq, ndlp);
James Smart6b5151f2012-01-18 16:24:06 -050017292 if (cmpl) {
17293 mboxq->mbox_cmpl = cmpl;
17294 mboxq->context1 = arg;
17295 mboxq->context2 = ndlp;
James Smart72859902012-01-18 16:25:38 -050017296 } else
17297 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart6b5151f2012-01-18 16:24:06 -050017298 mboxq->vport = ndlp->vport;
James Smart6fb120a2009-05-22 14:52:59 -040017299 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17300 if (rc == MBX_NOT_FINISHED) {
17301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17302 "2010 Resume RPI Mailbox failed "
17303 "status %d, mbxStatus x%x\n", rc,
17304 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17305 mempool_free(mboxq, phba->mbox_mem_pool);
17306 return -EIO;
17307 }
17308 return 0;
17309}
17310
17311/**
17312 * lpfc_sli4_init_vpi - Initialize a vpi with the port
James Smart76a95d72010-11-20 23:11:48 -050017313 * @vport: Pointer to the vport for which the vpi is being initialized
James Smart6fb120a2009-05-22 14:52:59 -040017314 *
James Smart76a95d72010-11-20 23:11:48 -050017315 * This routine is invoked to activate a vpi with the port.
James Smart6fb120a2009-05-22 14:52:59 -040017316 *
17317 * Returns:
17318 * 0 success
17319 * -Evalue otherwise
17320 **/
17321int
James Smart76a95d72010-11-20 23:11:48 -050017322lpfc_sli4_init_vpi(struct lpfc_vport *vport)
James Smart6fb120a2009-05-22 14:52:59 -040017323{
17324 LPFC_MBOXQ_t *mboxq;
17325 int rc = 0;
James Smart6a9c52c2009-10-02 15:16:51 -040017326 int retval = MBX_SUCCESS;
James Smart6fb120a2009-05-22 14:52:59 -040017327 uint32_t mbox_tmo;
James Smart76a95d72010-11-20 23:11:48 -050017328 struct lpfc_hba *phba = vport->phba;
James Smart6fb120a2009-05-22 14:52:59 -040017329 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17330 if (!mboxq)
17331 return -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -050017332 lpfc_init_vpi(phba, mboxq, vport->vpi);
James Smarta183a152011-10-10 21:32:43 -040017333 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
James Smart6fb120a2009-05-22 14:52:59 -040017334 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
James Smart6fb120a2009-05-22 14:52:59 -040017335 if (rc != MBX_SUCCESS) {
James Smart76a95d72010-11-20 23:11:48 -050017336 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
James Smart6fb120a2009-05-22 14:52:59 -040017337 "2022 INIT VPI Mailbox failed "
17338 "status %d, mbxStatus x%x\n", rc,
17339 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smart6a9c52c2009-10-02 15:16:51 -040017340 retval = -EIO;
James Smart6fb120a2009-05-22 14:52:59 -040017341 }
James Smart6a9c52c2009-10-02 15:16:51 -040017342 if (rc != MBX_TIMEOUT)
James Smart76a95d72010-11-20 23:11:48 -050017343 mempool_free(mboxq, vport->phba->mbox_mem_pool);
James Smart6a9c52c2009-10-02 15:16:51 -040017344
17345 return retval;
James Smart6fb120a2009-05-22 14:52:59 -040017346}
17347
17348/**
17349 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
17350 * @phba: pointer to lpfc hba data structure.
17351 * @mboxq: Pointer to mailbox object.
17352 *
17353 * This routine is invoked to manually add a single FCF record. The caller
17354 * must pass a completely initialized FCF_Record. This routine takes
17355 * care of the nonembedded mailbox operations.
17356 **/
17357static void
17358lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17359{
17360 void *virt_addr;
17361 union lpfc_sli4_cfg_shdr *shdr;
17362 uint32_t shdr_status, shdr_add_status;
17363
17364 virt_addr = mboxq->sge_array->addr[0];
17365 /* The IOCTL status is embedded in the mailbox subheader. */
17366 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17367 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17369
17370 if ((shdr_status || shdr_add_status) &&
17371 (shdr_status != STATUS_FCF_IN_USE))
17372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17373 "2558 ADD_FCF_RECORD mailbox failed with "
17374 "status x%x add_status x%x\n",
17375 shdr_status, shdr_add_status);
17376
17377 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17378}
17379
17380/**
17381 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
17382 * @phba: pointer to lpfc hba data structure.
17383 * @fcf_record: pointer to the initialized fcf record to add.
17384 *
17385 * This routine is invoked to manually add a single FCF record. The caller
17386 * must pass a completely initialized FCF_Record. This routine takes
17387 * care of the nonembedded mailbox operations.
17388 **/
17389int
17390lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17391{
17392 int rc = 0;
17393 LPFC_MBOXQ_t *mboxq;
17394 uint8_t *bytep;
17395 void *virt_addr;
James Smart6fb120a2009-05-22 14:52:59 -040017396 struct lpfc_mbx_sge sge;
17397 uint32_t alloc_len, req_len;
17398 uint32_t fcfindex;
17399
17400 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17401 if (!mboxq) {
17402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17403 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
17404 return -ENOMEM;
17405 }
17406
17407 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17408 sizeof(uint32_t);
17409
17410 /* Allocate DMA memory and set up the non-embedded mailbox command */
17411 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17412 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17413 req_len, LPFC_SLI4_MBX_NEMBED);
17414 if (alloc_len < req_len) {
17415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17416 "2523 Allocated DMA memory size (x%x) is "
17417 "less than the requested DMA memory "
17418 "size (x%x)\n", alloc_len, req_len);
17419 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17420 return -ENOMEM;
17421 }
17422
17423 /*
17424 * Get the first SGE entry from the non-embedded DMA memory. This
17425 * routine only uses a single SGE.
17426 */
17427 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
James Smart6fb120a2009-05-22 14:52:59 -040017428 virt_addr = mboxq->sge_array->addr[0];
17429 /*
17430 * Configure the FCF record for FCFI 0. This is the driver's
17431 * hardcoded default and gets used in nonFIP mode.
17432 */
17433 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
17434 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
17435 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
17436
17437 /*
17438 * Copy the fcf_index and the FCF Record Data. The data starts after
17439 * the FCoE header plus word10. The data copy needs to be endian
17440 * correct.
17441 */
17442 bytep += sizeof(uint32_t);
17443 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
17444 mboxq->vport = phba->pport;
17445 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
17446 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17447 if (rc == MBX_NOT_FINISHED) {
17448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17449 "2515 ADD_FCF_RECORD mailbox failed with "
17450 "status 0x%x\n", rc);
17451 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17452 rc = -EIO;
17453 } else
17454 rc = 0;
17455
17456 return rc;
17457}
17458
17459/**
17460 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
17461 * @phba: pointer to lpfc hba data structure.
17462 * @fcf_record: pointer to the fcf record to write the default data.
17463 * @fcf_index: FCF table entry index.
17464 *
17465 * This routine is invoked to build the driver's default FCF record. The
17466 * values used are hardcoded. This routine handles memory initialization.
17467 *
17468 **/
17469void
17470lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
17471 struct fcf_record *fcf_record,
17472 uint16_t fcf_index)
17473{
17474 memset(fcf_record, 0, sizeof(struct fcf_record));
17475 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
17476 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
17477 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
17478 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
17479 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
17480 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
17481 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
17482 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
17483 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
17484 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
17485 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
17486 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
17487 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
James Smart0c287582009-06-10 17:22:56 -040017488 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
James Smart6fb120a2009-05-22 14:52:59 -040017489 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
17490 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
17491 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
17492 /* Set the VLAN bit map */
17493 if (phba->valid_vlan) {
17494 fcf_record->vlan_bitmap[phba->vlan_id / 8]
17495 = 1 << (phba->vlan_id % 8);
17496 }
17497}
17498
17499/**
James Smart0c9ab6f2010-02-26 14:15:57 -050017500 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
James Smart6fb120a2009-05-22 14:52:59 -040017501 * @phba: pointer to lpfc hba data structure.
17502 * @fcf_index: FCF table entry offset.
17503 *
James Smart0c9ab6f2010-02-26 14:15:57 -050017504 * This routine is invoked to scan the entire FCF table by reading FCF
17505 * record and processing it one at a time starting from the @fcf_index
17506 * for initial FCF discovery or fast FCF failover rediscovery.
17507 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030017508 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050017509 * otherwise.
James Smart6fb120a2009-05-22 14:52:59 -040017510 **/
17511int
James Smart0c9ab6f2010-02-26 14:15:57 -050017512lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
James Smart6fb120a2009-05-22 14:52:59 -040017513{
17514 int rc = 0, error;
17515 LPFC_MBOXQ_t *mboxq;
James Smart6fb120a2009-05-22 14:52:59 -040017516
James Smart32b97932009-07-19 10:01:21 -040017517 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
James Smart80c17842012-03-01 22:35:45 -050017518 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
James Smart6fb120a2009-05-22 14:52:59 -040017519 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17520 if (!mboxq) {
17521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17522 "2000 Failed to allocate mbox for "
17523 "READ_FCF cmd\n");
James Smart4d9ab992009-10-02 15:16:39 -040017524 error = -ENOMEM;
James Smart0c9ab6f2010-02-26 14:15:57 -050017525 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040017526 }
James Smartecfd03c2010-02-12 14:41:27 -050017527 /* Construct the read FCF record mailbox command */
James Smart0c9ab6f2010-02-26 14:15:57 -050017528 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
James Smartecfd03c2010-02-12 14:41:27 -050017529 if (rc) {
17530 error = -EINVAL;
James Smart0c9ab6f2010-02-26 14:15:57 -050017531 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040017532 }
James Smartecfd03c2010-02-12 14:41:27 -050017533 /* Issue the mailbox command asynchronously */
James Smart6fb120a2009-05-22 14:52:59 -040017534 mboxq->vport = phba->pport;
James Smart0c9ab6f2010-02-26 14:15:57 -050017535 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
James Smarta93ff372010-10-22 11:06:08 -040017536
17537 spin_lock_irq(&phba->hbalock);
17538 phba->hba_flag |= FCF_TS_INPROG;
17539 spin_unlock_irq(&phba->hbalock);
17540
James Smart6fb120a2009-05-22 14:52:59 -040017541 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
James Smartecfd03c2010-02-12 14:41:27 -050017542 if (rc == MBX_NOT_FINISHED)
James Smart6fb120a2009-05-22 14:52:59 -040017543 error = -EIO;
James Smartecfd03c2010-02-12 14:41:27 -050017544 else {
James Smart38b92ef2010-08-04 16:11:39 -040017545 /* Reset eligible FCF count for new scan */
17546 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
James Smart999d8132010-03-15 11:24:56 -040017547 phba->fcf.eligible_fcf_cnt = 0;
James Smart6fb120a2009-05-22 14:52:59 -040017548 error = 0;
James Smart32b97932009-07-19 10:01:21 -040017549 }
James Smart0c9ab6f2010-02-26 14:15:57 -050017550fail_fcf_scan:
James Smart4d9ab992009-10-02 15:16:39 -040017551 if (error) {
17552 if (mboxq)
17553 lpfc_sli4_mbox_cmd_free(phba, mboxq);
James Smarta93ff372010-10-22 11:06:08 -040017554 /* FCF scan failed, clear FCF_TS_INPROG flag */
James Smart4d9ab992009-10-02 15:16:39 -040017555 spin_lock_irq(&phba->hbalock);
James Smarta93ff372010-10-22 11:06:08 -040017556 phba->hba_flag &= ~FCF_TS_INPROG;
James Smart4d9ab992009-10-02 15:16:39 -040017557 spin_unlock_irq(&phba->hbalock);
17558 }
James Smart6fb120a2009-05-22 14:52:59 -040017559 return error;
17560}
James Smarta0c87cb2009-07-19 10:01:10 -040017561
17562/**
James Smarta93ff372010-10-22 11:06:08 -040017563 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
James Smart0c9ab6f2010-02-26 14:15:57 -050017564 * @phba: pointer to lpfc hba data structure.
17565 * @fcf_index: FCF table entry offset.
17566 *
17567 * This routine is invoked to read an FCF record indicated by @fcf_index
James Smarta93ff372010-10-22 11:06:08 -040017568 * and to use it for FLOGI roundrobin FCF failover.
James Smart0c9ab6f2010-02-26 14:15:57 -050017569 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030017570 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050017571 * otherwise.
17572 **/
17573int
17574lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17575{
17576 int rc = 0, error;
17577 LPFC_MBOXQ_t *mboxq;
17578
17579 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17580 if (!mboxq) {
17581 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17582 "2763 Failed to allocate mbox for "
17583 "READ_FCF cmd\n");
17584 error = -ENOMEM;
17585 goto fail_fcf_read;
17586 }
17587 /* Construct the read FCF record mailbox command */
17588 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17589 if (rc) {
17590 error = -EINVAL;
17591 goto fail_fcf_read;
17592 }
17593 /* Issue the mailbox command asynchronously */
17594 mboxq->vport = phba->pport;
17595 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
17596 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17597 if (rc == MBX_NOT_FINISHED)
17598 error = -EIO;
17599 else
17600 error = 0;
17601
17602fail_fcf_read:
17603 if (error && mboxq)
17604 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17605 return error;
17606}
17607
17608/**
17609 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
17610 * @phba: pointer to lpfc hba data structure.
17611 * @fcf_index: FCF table entry offset.
17612 *
17613 * This routine is invoked to read an FCF record indicated by @fcf_index to
James Smarta93ff372010-10-22 11:06:08 -040017614 * determine whether it's eligible for FLOGI roundrobin failover list.
James Smart0c9ab6f2010-02-26 14:15:57 -050017615 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030017616 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050017617 * otherwise.
17618 **/
17619int
17620lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17621{
17622 int rc = 0, error;
17623 LPFC_MBOXQ_t *mboxq;
17624
17625 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17626 if (!mboxq) {
17627 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17628 "2758 Failed to allocate mbox for "
17629 "READ_FCF cmd\n");
17630 error = -ENOMEM;
17631 goto fail_fcf_read;
17632 }
17633 /* Construct the read FCF record mailbox command */
17634 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17635 if (rc) {
17636 error = -EINVAL;
17637 goto fail_fcf_read;
17638 }
17639 /* Issue the mailbox command asynchronously */
17640 mboxq->vport = phba->pport;
17641 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
17642 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17643 if (rc == MBX_NOT_FINISHED)
17644 error = -EIO;
17645 else
17646 error = 0;
17647
17648fail_fcf_read:
17649 if (error && mboxq)
17650 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17651 return error;
17652}
17653
17654/**
James Smartf5cb5302015-12-16 18:11:52 -050017655 * lpfc_check_next_fcf_pri_level
James Smart7d791df2011-07-22 18:37:52 -040017656 * phba pointer to the lpfc_hba struct for this port.
17657 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
17658 * routine when the rr_bmask is empty. The FCF indecies are put into the
17659 * rr_bmask based on their priority level. Starting from the highest priority
17660 * to the lowest. The most likely FCF candidate will be in the highest
17661 * priority group. When this routine is called it searches the fcf_pri list for
17662 * next lowest priority group and repopulates the rr_bmask with only those
17663 * fcf_indexes.
17664 * returns:
17665 * 1=success 0=failure
17666 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017667static int
James Smart7d791df2011-07-22 18:37:52 -040017668lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
17669{
17670 uint16_t next_fcf_pri;
17671 uint16_t last_index;
17672 struct lpfc_fcf_pri *fcf_pri;
17673 int rc;
17674 int ret = 0;
17675
17676 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
17677 LPFC_SLI4_FCF_TBL_INDX_MAX);
17678 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17679 "3060 Last IDX %d\n", last_index);
James Smart25626692013-03-01 16:36:54 -050017680
17681 /* Verify the priority list has 2 or more entries */
17682 spin_lock_irq(&phba->hbalock);
17683 if (list_empty(&phba->fcf.fcf_pri_list) ||
17684 list_is_singular(&phba->fcf.fcf_pri_list)) {
17685 spin_unlock_irq(&phba->hbalock);
James Smart7d791df2011-07-22 18:37:52 -040017686 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17687 "3061 Last IDX %d\n", last_index);
17688 return 0; /* Empty rr list */
17689 }
James Smart25626692013-03-01 16:36:54 -050017690 spin_unlock_irq(&phba->hbalock);
17691
James Smart7d791df2011-07-22 18:37:52 -040017692 next_fcf_pri = 0;
17693 /*
17694 * Clear the rr_bmask and set all of the bits that are at this
17695 * priority.
17696 */
17697 memset(phba->fcf.fcf_rr_bmask, 0,
17698 sizeof(*phba->fcf.fcf_rr_bmask));
17699 spin_lock_irq(&phba->hbalock);
17700 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17701 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
17702 continue;
17703 /*
17704 * the 1st priority that has not FLOGI failed
17705 * will be the highest.
17706 */
17707 if (!next_fcf_pri)
17708 next_fcf_pri = fcf_pri->fcf_rec.priority;
17709 spin_unlock_irq(&phba->hbalock);
17710 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17711 rc = lpfc_sli4_fcf_rr_index_set(phba,
17712 fcf_pri->fcf_rec.fcf_index);
17713 if (rc)
17714 return 0;
17715 }
17716 spin_lock_irq(&phba->hbalock);
17717 }
17718 /*
17719 * if next_fcf_pri was not set above and the list is not empty then
17720 * we have failed flogis on all of them. So reset flogi failed
Anatol Pomozov4907cb72012-09-01 10:31:09 -070017721 * and start at the beginning.
James Smart7d791df2011-07-22 18:37:52 -040017722 */
17723 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
17724 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17725 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
17726 /*
17727 * the 1st priority that has not FLOGI failed
17728 * will be the highest.
17729 */
17730 if (!next_fcf_pri)
17731 next_fcf_pri = fcf_pri->fcf_rec.priority;
17732 spin_unlock_irq(&phba->hbalock);
17733 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17734 rc = lpfc_sli4_fcf_rr_index_set(phba,
17735 fcf_pri->fcf_rec.fcf_index);
17736 if (rc)
17737 return 0;
17738 }
17739 spin_lock_irq(&phba->hbalock);
17740 }
17741 } else
17742 ret = 1;
17743 spin_unlock_irq(&phba->hbalock);
17744
17745 return ret;
17746}
17747/**
James Smart0c9ab6f2010-02-26 14:15:57 -050017748 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
17749 * @phba: pointer to lpfc hba data structure.
17750 *
17751 * This routine is to get the next eligible FCF record index in a round
17752 * robin fashion. If the next eligible FCF record index equals to the
James Smarta93ff372010-10-22 11:06:08 -040017753 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
James Smart0c9ab6f2010-02-26 14:15:57 -050017754 * shall be returned, otherwise, the next eligible FCF record's index
17755 * shall be returned.
17756 **/
17757uint16_t
17758lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
17759{
17760 uint16_t next_fcf_index;
17761
James Smart421c6622013-01-03 15:44:16 -050017762initial_priority:
James Smart3804dc82010-07-14 15:31:37 -040017763 /* Search start from next bit of currently registered FCF index */
James Smart421c6622013-01-03 15:44:16 -050017764 next_fcf_index = phba->fcf.current_rec.fcf_indx;
17765
James Smart7d791df2011-07-22 18:37:52 -040017766next_priority:
James Smart421c6622013-01-03 15:44:16 -050017767 /* Determine the next fcf index to check */
17768 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
James Smart0c9ab6f2010-02-26 14:15:57 -050017769 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17770 LPFC_SLI4_FCF_TBL_INDX_MAX,
James Smart3804dc82010-07-14 15:31:37 -040017771 next_fcf_index);
17772
James Smart0c9ab6f2010-02-26 14:15:57 -050017773 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
James Smart7d791df2011-07-22 18:37:52 -040017774 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17775 /*
17776 * If we have wrapped then we need to clear the bits that
17777 * have been tested so that we can detect when we should
17778 * change the priority level.
17779 */
James Smart0c9ab6f2010-02-26 14:15:57 -050017780 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17781 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
James Smart7d791df2011-07-22 18:37:52 -040017782 }
17783
James Smart0c9ab6f2010-02-26 14:15:57 -050017784
James Smart3804dc82010-07-14 15:31:37 -040017785 /* Check roundrobin failover list empty condition */
James Smart7d791df2011-07-22 18:37:52 -040017786 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
17787 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
17788 /*
17789 * If next fcf index is not found check if there are lower
17790 * Priority level fcf's in the fcf_priority list.
17791 * Set up the rr_bmask with all of the avaiable fcf bits
17792 * at that level and continue the selection process.
17793 */
17794 if (lpfc_check_next_fcf_pri_level(phba))
James Smart421c6622013-01-03 15:44:16 -050017795 goto initial_priority;
James Smart3804dc82010-07-14 15:31:37 -040017796 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17797 "2844 No roundrobin failover FCF available\n");
James Smart7d791df2011-07-22 18:37:52 -040017798 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
17799 return LPFC_FCOE_FCF_NEXT_NONE;
17800 else {
17801 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17802 "3063 Only FCF available idx %d, flag %x\n",
17803 next_fcf_index,
17804 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
17805 return next_fcf_index;
17806 }
James Smart3804dc82010-07-14 15:31:37 -040017807 }
17808
James Smart7d791df2011-07-22 18:37:52 -040017809 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
17810 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
James Smartf5cb5302015-12-16 18:11:52 -050017811 LPFC_FCF_FLOGI_FAILED) {
17812 if (list_is_singular(&phba->fcf.fcf_pri_list))
17813 return LPFC_FCOE_FCF_NEXT_NONE;
17814
James Smart7d791df2011-07-22 18:37:52 -040017815 goto next_priority;
James Smartf5cb5302015-12-16 18:11:52 -050017816 }
James Smart7d791df2011-07-22 18:37:52 -040017817
James Smart3804dc82010-07-14 15:31:37 -040017818 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017819 "2845 Get next roundrobin failover FCF (x%x)\n",
17820 next_fcf_index);
17821
James Smart0c9ab6f2010-02-26 14:15:57 -050017822 return next_fcf_index;
17823}
17824
17825/**
17826 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
17827 * @phba: pointer to lpfc hba data structure.
17828 *
17829 * This routine sets the FCF record index in to the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040017830 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050017831 * does not go beyond the range of the driver allocated bmask dimension
17832 * before setting the bit.
17833 *
17834 * Returns 0 if the index bit successfully set, otherwise, it returns
17835 * -EINVAL.
17836 **/
17837int
17838lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
17839{
17840 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17841 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017842 "2610 FCF (x%x) reached driver's book "
17843 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050017844 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17845 return -EINVAL;
17846 }
17847 /* Set the eligible FCF record index bmask */
17848 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
17849
James Smart3804dc82010-07-14 15:31:37 -040017850 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017851 "2790 Set FCF (x%x) to roundrobin FCF failover "
James Smart3804dc82010-07-14 15:31:37 -040017852 "bmask\n", fcf_index);
17853
James Smart0c9ab6f2010-02-26 14:15:57 -050017854 return 0;
17855}
17856
17857/**
James Smart3804dc82010-07-14 15:31:37 -040017858 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
James Smart0c9ab6f2010-02-26 14:15:57 -050017859 * @phba: pointer to lpfc hba data structure.
17860 *
17861 * This routine clears the FCF record index from the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040017862 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050017863 * does not go beyond the range of the driver allocated bmask dimension
17864 * before clearing the bit.
17865 **/
17866void
17867lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
17868{
James Smart9a803a72013-09-06 12:17:56 -040017869 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
James Smart0c9ab6f2010-02-26 14:15:57 -050017870 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17871 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017872 "2762 FCF (x%x) reached driver's book "
17873 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050017874 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17875 return;
17876 }
17877 /* Clear the eligible FCF record index bmask */
James Smart7d791df2011-07-22 18:37:52 -040017878 spin_lock_irq(&phba->hbalock);
James Smart9a803a72013-09-06 12:17:56 -040017879 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
17880 list) {
James Smart7d791df2011-07-22 18:37:52 -040017881 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
17882 list_del_init(&fcf_pri->list);
17883 break;
17884 }
17885 }
17886 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050017887 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
James Smart3804dc82010-07-14 15:31:37 -040017888
17889 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017890 "2791 Clear FCF (x%x) from roundrobin failover "
James Smart3804dc82010-07-14 15:31:37 -040017891 "bmask\n", fcf_index);
James Smart0c9ab6f2010-02-26 14:15:57 -050017892}
17893
17894/**
James Smartecfd03c2010-02-12 14:41:27 -050017895 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
17896 * @phba: pointer to lpfc hba data structure.
17897 *
17898 * This routine is the completion routine for the rediscover FCF table mailbox
17899 * command. If the mailbox command returned failure, it will try to stop the
17900 * FCF rediscover wait timer.
17901 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017902static void
James Smartecfd03c2010-02-12 14:41:27 -050017903lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
17904{
17905 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17906 uint32_t shdr_status, shdr_add_status;
17907
17908 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17909
17910 shdr_status = bf_get(lpfc_mbox_hdr_status,
17911 &redisc_fcf->header.cfg_shdr.response);
17912 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
17913 &redisc_fcf->header.cfg_shdr.response);
17914 if (shdr_status || shdr_add_status) {
James Smart0c9ab6f2010-02-26 14:15:57 -050017915 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smartecfd03c2010-02-12 14:41:27 -050017916 "2746 Requesting for FCF rediscovery failed "
17917 "status x%x add_status x%x\n",
17918 shdr_status, shdr_add_status);
James Smart0c9ab6f2010-02-26 14:15:57 -050017919 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
James Smartfc2b9892010-02-26 14:15:29 -050017920 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050017921 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050017922 spin_unlock_irq(&phba->hbalock);
17923 /*
17924 * CVL event triggered FCF rediscover request failed,
17925 * last resort to re-try current registered FCF entry.
17926 */
17927 lpfc_retry_pport_discovery(phba);
17928 } else {
17929 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050017930 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050017931 spin_unlock_irq(&phba->hbalock);
17932 /*
17933 * DEAD FCF event triggered FCF rediscover request
17934 * failed, last resort to fail over as a link down
17935 * to FCF registration.
17936 */
17937 lpfc_sli4_fcf_dead_failthrough(phba);
17938 }
James Smart0c9ab6f2010-02-26 14:15:57 -050017939 } else {
17940 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017941 "2775 Start FCF rediscover quiescent timer\n");
James Smartecfd03c2010-02-12 14:41:27 -050017942 /*
17943 * Start FCF rediscovery wait timer for pending FCF
17944 * before rescan FCF record table.
17945 */
17946 lpfc_fcf_redisc_wait_start_timer(phba);
James Smart0c9ab6f2010-02-26 14:15:57 -050017947 }
James Smartecfd03c2010-02-12 14:41:27 -050017948
17949 mempool_free(mbox, phba->mbox_mem_pool);
17950}
17951
17952/**
James Smart3804dc82010-07-14 15:31:37 -040017953 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
James Smartecfd03c2010-02-12 14:41:27 -050017954 * @phba: pointer to lpfc hba data structure.
17955 *
17956 * This routine is invoked to request for rediscovery of the entire FCF table
17957 * by the port.
17958 **/
17959int
17960lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
17961{
17962 LPFC_MBOXQ_t *mbox;
17963 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17964 int rc, length;
17965
James Smart0c9ab6f2010-02-26 14:15:57 -050017966 /* Cancel retry delay timers to all vports before FCF rediscover */
17967 lpfc_cancel_all_vport_retry_delay_timer(phba);
17968
James Smartecfd03c2010-02-12 14:41:27 -050017969 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17970 if (!mbox) {
17971 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17972 "2745 Failed to allocate mbox for "
17973 "requesting FCF rediscover.\n");
17974 return -ENOMEM;
17975 }
17976
17977 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
17978 sizeof(struct lpfc_sli4_cfg_mhdr));
17979 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17980 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
17981 length, LPFC_SLI4_MBX_EMBED);
17982
17983 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17984 /* Set count to 0 for invalidating the entire FCF database */
17985 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
17986
17987 /* Issue the mailbox command asynchronously */
17988 mbox->vport = phba->pport;
17989 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
17990 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
17991
17992 if (rc == MBX_NOT_FINISHED) {
17993 mempool_free(mbox, phba->mbox_mem_pool);
17994 return -EIO;
17995 }
17996 return 0;
17997}
17998
17999/**
James Smartfc2b9892010-02-26 14:15:29 -050018000 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18001 * @phba: pointer to lpfc hba data structure.
18002 *
18003 * This function is the failover routine as a last resort to the FCF DEAD
18004 * event when driver failed to perform fast FCF failover.
18005 **/
18006void
18007lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18008{
18009 uint32_t link_state;
18010
18011 /*
18012 * Last resort as FCF DEAD event failover will treat this as
18013 * a link down, but save the link state because we don't want
18014 * it to be changed to Link Down unless it is already down.
18015 */
18016 link_state = phba->link_state;
18017 lpfc_linkdown(phba);
18018 phba->link_state = link_state;
18019
18020 /* Unregister FCF if no devices connected to it */
18021 lpfc_unregister_unused_fcf(phba);
18022}
18023
18024/**
James Smart026abb82011-12-13 13:20:45 -050018025 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040018026 * @phba: pointer to lpfc hba data structure.
James Smart026abb82011-12-13 13:20:45 -050018027 * @rgn23_data: pointer to configure region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040018028 *
James Smart026abb82011-12-13 13:20:45 -050018029 * This function gets SLI3 port configure region 23 data through memory dump
18030 * mailbox command. When it successfully retrieves data, the size of the data
18031 * will be returned, otherwise, 0 will be returned.
James Smarta0c87cb2009-07-19 10:01:10 -040018032 **/
James Smart026abb82011-12-13 13:20:45 -050018033static uint32_t
18034lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
James Smarta0c87cb2009-07-19 10:01:10 -040018035{
18036 LPFC_MBOXQ_t *pmb = NULL;
18037 MAILBOX_t *mb;
James Smart026abb82011-12-13 13:20:45 -050018038 uint32_t offset = 0;
James Smarta0c87cb2009-07-19 10:01:10 -040018039 int rc;
18040
James Smart026abb82011-12-13 13:20:45 -050018041 if (!rgn23_data)
18042 return 0;
18043
James Smarta0c87cb2009-07-19 10:01:10 -040018044 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18045 if (!pmb) {
18046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050018047 "2600 failed to allocate mailbox memory\n");
18048 return 0;
James Smarta0c87cb2009-07-19 10:01:10 -040018049 }
18050 mb = &pmb->u.mb;
18051
James Smarta0c87cb2009-07-19 10:01:10 -040018052 do {
18053 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18054 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18055
18056 if (rc != MBX_SUCCESS) {
18057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050018058 "2601 failed to read config "
18059 "region 23, rc 0x%x Status 0x%x\n",
18060 rc, mb->mbxStatus);
James Smarta0c87cb2009-07-19 10:01:10 -040018061 mb->un.varDmp.word_cnt = 0;
18062 }
18063 /*
18064 * dump mem may return a zero when finished or we got a
18065 * mailbox error, either way we are done.
18066 */
18067 if (mb->un.varDmp.word_cnt == 0)
18068 break;
18069 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18070 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18071
18072 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
James Smart026abb82011-12-13 13:20:45 -050018073 rgn23_data + offset,
18074 mb->un.varDmp.word_cnt);
James Smarta0c87cb2009-07-19 10:01:10 -040018075 offset += mb->un.varDmp.word_cnt;
18076 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18077
James Smart026abb82011-12-13 13:20:45 -050018078 mempool_free(pmb, phba->mbox_mem_pool);
18079 return offset;
18080}
18081
18082/**
18083 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18084 * @phba: pointer to lpfc hba data structure.
18085 * @rgn23_data: pointer to configure region 23 data.
18086 *
18087 * This function gets SLI4 port configure region 23 data through memory dump
18088 * mailbox command. When it successfully retrieves data, the size of the data
18089 * will be returned, otherwise, 0 will be returned.
18090 **/
18091static uint32_t
18092lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18093{
18094 LPFC_MBOXQ_t *mboxq = NULL;
18095 struct lpfc_dmabuf *mp = NULL;
18096 struct lpfc_mqe *mqe;
18097 uint32_t data_length = 0;
18098 int rc;
18099
18100 if (!rgn23_data)
18101 return 0;
18102
18103 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18104 if (!mboxq) {
18105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18106 "3105 failed to allocate mailbox memory\n");
18107 return 0;
18108 }
18109
18110 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18111 goto out;
18112 mqe = &mboxq->u.mqe;
18113 mp = (struct lpfc_dmabuf *) mboxq->context1;
18114 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18115 if (rc)
18116 goto out;
18117 data_length = mqe->un.mb_words[5];
18118 if (data_length == 0)
18119 goto out;
18120 if (data_length > DMP_RGN23_SIZE) {
18121 data_length = 0;
18122 goto out;
18123 }
18124 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18125out:
18126 mempool_free(mboxq, phba->mbox_mem_pool);
18127 if (mp) {
18128 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18129 kfree(mp);
18130 }
18131 return data_length;
18132}
18133
18134/**
18135 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18136 * @phba: pointer to lpfc hba data structure.
18137 *
18138 * This function read region 23 and parse TLV for port status to
18139 * decide if the user disaled the port. If the TLV indicates the
18140 * port is disabled, the hba_flag is set accordingly.
18141 **/
18142void
18143lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18144{
18145 uint8_t *rgn23_data = NULL;
18146 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18147 uint32_t offset = 0;
18148
18149 /* Get adapter Region 23 data */
18150 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18151 if (!rgn23_data)
18152 goto out;
18153
18154 if (phba->sli_rev < LPFC_SLI_REV4)
18155 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18156 else {
18157 if_type = bf_get(lpfc_sli_intf_if_type,
18158 &phba->sli4_hba.sli_intf);
18159 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18160 goto out;
18161 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18162 }
James Smarta0c87cb2009-07-19 10:01:10 -040018163
18164 if (!data_size)
18165 goto out;
18166
18167 /* Check the region signature first */
18168 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18170 "2619 Config region 23 has bad signature\n");
18171 goto out;
18172 }
18173 offset += 4;
18174
18175 /* Check the data structure version */
18176 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18178 "2620 Config region 23 has bad version\n");
18179 goto out;
18180 }
18181 offset += 4;
18182
18183 /* Parse TLV entries in the region */
18184 while (offset < data_size) {
18185 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18186 break;
18187 /*
18188 * If the TLV is not driver specific TLV or driver id is
18189 * not linux driver id, skip the record.
18190 */
18191 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18192 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18193 (rgn23_data[offset + 3] != 0)) {
18194 offset += rgn23_data[offset + 1] * 4 + 4;
18195 continue;
18196 }
18197
18198 /* Driver found a driver specific TLV in the config region */
18199 sub_tlv_len = rgn23_data[offset + 1] * 4;
18200 offset += 4;
18201 tlv_offset = 0;
18202
18203 /*
18204 * Search for configured port state sub-TLV.
18205 */
18206 while ((offset < data_size) &&
18207 (tlv_offset < sub_tlv_len)) {
18208 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18209 offset += 4;
18210 tlv_offset += 4;
18211 break;
18212 }
18213 if (rgn23_data[offset] != PORT_STE_TYPE) {
18214 offset += rgn23_data[offset + 1] * 4 + 4;
18215 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18216 continue;
18217 }
18218
18219 /* This HBA contains PORT_STE configured */
18220 if (!rgn23_data[offset + 2])
18221 phba->hba_flag |= LINK_DISABLED;
18222
18223 goto out;
18224 }
18225 }
James Smart026abb82011-12-13 13:20:45 -050018226
James Smarta0c87cb2009-07-19 10:01:10 -040018227out:
James Smarta0c87cb2009-07-19 10:01:10 -040018228 kfree(rgn23_data);
18229 return;
18230}
James Smart695a8142010-01-26 23:08:03 -050018231
18232/**
James Smart52d52442011-05-24 11:42:45 -040018233 * lpfc_wr_object - write an object to the firmware
18234 * @phba: HBA structure that indicates port to create a queue on.
18235 * @dmabuf_list: list of dmabufs to write to the port.
18236 * @size: the total byte value of the objects to write to the port.
18237 * @offset: the current offset to be used to start the transfer.
18238 *
18239 * This routine will create a wr_object mailbox command to send to the port.
18240 * the mailbox command will be constructed using the dma buffers described in
18241 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18242 * BDEs that the imbedded mailbox can support. The @offset variable will be
18243 * used to indicate the starting offset of the transfer and will also return
18244 * the offset after the write object mailbox has completed. @size is used to
18245 * determine the end of the object and whether the eof bit should be set.
18246 *
18247 * Return 0 is successful and offset will contain the the new offset to use
18248 * for the next write.
18249 * Return negative value for error cases.
18250 **/
18251int
18252lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18253 uint32_t size, uint32_t *offset)
18254{
18255 struct lpfc_mbx_wr_object *wr_object;
18256 LPFC_MBOXQ_t *mbox;
18257 int rc = 0, i = 0;
18258 uint32_t shdr_status, shdr_add_status;
18259 uint32_t mbox_tmo;
18260 union lpfc_sli4_cfg_shdr *shdr;
18261 struct lpfc_dmabuf *dmabuf;
18262 uint32_t written = 0;
18263
18264 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18265 if (!mbox)
18266 return -ENOMEM;
18267
18268 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18269 LPFC_MBOX_OPCODE_WRITE_OBJECT,
18270 sizeof(struct lpfc_mbx_wr_object) -
18271 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18272
18273 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18274 wr_object->u.request.write_offset = *offset;
18275 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18276 wr_object->u.request.object_name[0] =
18277 cpu_to_le32(wr_object->u.request.object_name[0]);
18278 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18279 list_for_each_entry(dmabuf, dmabuf_list, list) {
18280 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18281 break;
18282 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18283 wr_object->u.request.bde[i].addrHigh =
18284 putPaddrHigh(dmabuf->phys);
18285 if (written + SLI4_PAGE_SIZE >= size) {
18286 wr_object->u.request.bde[i].tus.f.bdeSize =
18287 (size - written);
18288 written += (size - written);
18289 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18290 } else {
18291 wr_object->u.request.bde[i].tus.f.bdeSize =
18292 SLI4_PAGE_SIZE;
18293 written += SLI4_PAGE_SIZE;
18294 }
18295 i++;
18296 }
18297 wr_object->u.request.bde_count = i;
18298 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18299 if (!phba->sli4_hba.intr_enable)
18300 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18301 else {
James Smarta183a152011-10-10 21:32:43 -040018302 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart52d52442011-05-24 11:42:45 -040018303 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18304 }
18305 /* The IOCTL status is embedded in the mailbox subheader. */
18306 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18307 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18308 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18309 if (rc != MBX_TIMEOUT)
18310 mempool_free(mbox, phba->mbox_mem_pool);
18311 if (shdr_status || shdr_add_status || rc) {
18312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18313 "3025 Write Object mailbox failed with "
18314 "status x%x add_status x%x, mbx status x%x\n",
18315 shdr_status, shdr_add_status, rc);
18316 rc = -ENXIO;
18317 } else
18318 *offset += wr_object->u.response.actual_write_length;
18319 return rc;
18320}
18321
18322/**
James Smart695a8142010-01-26 23:08:03 -050018323 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18324 * @vport: pointer to vport data structure.
18325 *
18326 * This function iterate through the mailboxq and clean up all REG_LOGIN
18327 * and REG_VPI mailbox commands associated with the vport. This function
18328 * is called when driver want to restart discovery of the vport due to
18329 * a Clear Virtual Link event.
18330 **/
18331void
18332lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18333{
18334 struct lpfc_hba *phba = vport->phba;
18335 LPFC_MBOXQ_t *mb, *nextmb;
18336 struct lpfc_dmabuf *mp;
James Smart78730cf2010-04-06 15:06:30 -040018337 struct lpfc_nodelist *ndlp;
James Smartd439d282010-09-29 11:18:45 -040018338 struct lpfc_nodelist *act_mbx_ndlp = NULL;
James Smart589a52d2010-07-14 15:30:54 -040018339 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
James Smartd439d282010-09-29 11:18:45 -040018340 LIST_HEAD(mbox_cmd_list);
James Smart63e801c2010-11-20 23:14:19 -050018341 uint8_t restart_loop;
James Smart695a8142010-01-26 23:08:03 -050018342
James Smartd439d282010-09-29 11:18:45 -040018343 /* Clean up internally queued mailbox commands with the vport */
James Smart695a8142010-01-26 23:08:03 -050018344 spin_lock_irq(&phba->hbalock);
18345 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18346 if (mb->vport != vport)
18347 continue;
18348
18349 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18350 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18351 continue;
18352
James Smartd439d282010-09-29 11:18:45 -040018353 list_del(&mb->list);
18354 list_add_tail(&mb->list, &mbox_cmd_list);
18355 }
18356 /* Clean up active mailbox command with the vport */
18357 mb = phba->sli.mbox_active;
18358 if (mb && (mb->vport == vport)) {
18359 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18360 (mb->u.mb.mbxCommand == MBX_REG_VPI))
18361 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18362 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18363 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18364 /* Put reference count for delayed processing */
18365 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18366 /* Unregister the RPI when mailbox complete */
18367 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18368 }
18369 }
James Smart63e801c2010-11-20 23:14:19 -050018370 /* Cleanup any mailbox completions which are not yet processed */
18371 do {
18372 restart_loop = 0;
18373 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18374 /*
18375 * If this mailox is already processed or it is
18376 * for another vport ignore it.
18377 */
18378 if ((mb->vport != vport) ||
18379 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18380 continue;
18381
18382 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18383 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18384 continue;
18385
18386 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18387 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18388 ndlp = (struct lpfc_nodelist *)mb->context2;
18389 /* Unregister the RPI when mailbox complete */
18390 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18391 restart_loop = 1;
18392 spin_unlock_irq(&phba->hbalock);
18393 spin_lock(shost->host_lock);
18394 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18395 spin_unlock(shost->host_lock);
18396 spin_lock_irq(&phba->hbalock);
18397 break;
18398 }
18399 }
18400 } while (restart_loop);
18401
James Smartd439d282010-09-29 11:18:45 -040018402 spin_unlock_irq(&phba->hbalock);
18403
18404 /* Release the cleaned-up mailbox commands */
18405 while (!list_empty(&mbox_cmd_list)) {
18406 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
James Smart695a8142010-01-26 23:08:03 -050018407 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18408 mp = (struct lpfc_dmabuf *) (mb->context1);
18409 if (mp) {
18410 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
18411 kfree(mp);
18412 }
James Smart78730cf2010-04-06 15:06:30 -040018413 ndlp = (struct lpfc_nodelist *) mb->context2;
James Smartd439d282010-09-29 11:18:45 -040018414 mb->context2 = NULL;
James Smart78730cf2010-04-06 15:06:30 -040018415 if (ndlp) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020018416 spin_lock(shost->host_lock);
James Smart589a52d2010-07-14 15:30:54 -040018417 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
Dan Carpenterec21b3b2010-08-08 00:15:17 +020018418 spin_unlock(shost->host_lock);
James Smart78730cf2010-04-06 15:06:30 -040018419 lpfc_nlp_put(ndlp);
James Smart78730cf2010-04-06 15:06:30 -040018420 }
James Smart695a8142010-01-26 23:08:03 -050018421 }
James Smart695a8142010-01-26 23:08:03 -050018422 mempool_free(mb, phba->mbox_mem_pool);
18423 }
James Smartd439d282010-09-29 11:18:45 -040018424
18425 /* Release the ndlp with the cleaned-up active mailbox command */
18426 if (act_mbx_ndlp) {
18427 spin_lock(shost->host_lock);
18428 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18429 spin_unlock(shost->host_lock);
18430 lpfc_nlp_put(act_mbx_ndlp);
James Smart695a8142010-01-26 23:08:03 -050018431 }
James Smart695a8142010-01-26 23:08:03 -050018432}
18433
James Smart2a9bf3d2010-06-07 15:24:45 -040018434/**
18435 * lpfc_drain_txq - Drain the txq
18436 * @phba: Pointer to HBA context object.
18437 *
18438 * This function attempt to submit IOCBs on the txq
18439 * to the adapter. For SLI4 adapters, the txq contains
18440 * ELS IOCBs that have been deferred because the there
18441 * are no SGLs. This congestion can occur with large
18442 * vport counts during node discovery.
18443 **/
18444
18445uint32_t
18446lpfc_drain_txq(struct lpfc_hba *phba)
18447{
18448 LIST_HEAD(completions);
James Smart895427b2017-02-12 13:52:30 -080018449 struct lpfc_sli_ring *pring;
Daeseok Youn2e706372014-02-21 09:03:32 +090018450 struct lpfc_iocbq *piocbq = NULL;
James Smart2a9bf3d2010-06-07 15:24:45 -040018451 unsigned long iflags = 0;
18452 char *fail_msg = NULL;
18453 struct lpfc_sglq *sglq;
James Smart2f077842016-12-19 15:07:29 -080018454 union lpfc_wqe128 wqe128;
18455 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
James Smarta2fc4aef2014-09-03 12:57:55 -040018456 uint32_t txq_cnt = 0;
James Smart2a9bf3d2010-06-07 15:24:45 -040018457
James Smart895427b2017-02-12 13:52:30 -080018458 pring = lpfc_phba_elsring(phba);
18459
James Smart398d81c2013-05-31 17:04:19 -040018460 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart0e9bb8d2013-03-01 16:35:12 -050018461 list_for_each_entry(piocbq, &pring->txq, list) {
18462 txq_cnt++;
18463 }
18464
18465 if (txq_cnt > pring->txq_max)
18466 pring->txq_max = txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040018467
James Smart398d81c2013-05-31 17:04:19 -040018468 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040018469
James Smart0e9bb8d2013-03-01 16:35:12 -050018470 while (!list_empty(&pring->txq)) {
James Smart398d81c2013-05-31 17:04:19 -040018471 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040018472
James Smart19ca7602010-11-20 23:11:55 -050018473 piocbq = lpfc_sli_ringtx_get(phba, pring);
James Smarta6298522012-06-12 13:54:11 -040018474 if (!piocbq) {
James Smart398d81c2013-05-31 17:04:19 -040018475 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smarta6298522012-06-12 13:54:11 -040018476 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18477 "2823 txq empty and txq_cnt is %d\n ",
James Smart0e9bb8d2013-03-01 16:35:12 -050018478 txq_cnt);
James Smarta6298522012-06-12 13:54:11 -040018479 break;
18480 }
James Smart895427b2017-02-12 13:52:30 -080018481 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -040018482 if (!sglq) {
James Smart19ca7602010-11-20 23:11:55 -050018483 __lpfc_sli_ringtx_put(phba, pring, piocbq);
James Smart398d81c2013-05-31 17:04:19 -040018484 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040018485 break;
James Smart2a9bf3d2010-06-07 15:24:45 -040018486 }
James Smart0e9bb8d2013-03-01 16:35:12 -050018487 txq_cnt--;
James Smart2a9bf3d2010-06-07 15:24:45 -040018488
18489 /* The xri and iocb resources secured,
18490 * attempt to issue request
18491 */
James Smart6d368e52011-05-24 11:44:12 -040018492 piocbq->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040018493 piocbq->sli4_xritag = sglq->sli4_xritag;
18494 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
18495 fail_msg = "to convert bpl to sgl";
James Smart2f077842016-12-19 15:07:29 -080018496 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040018497 fail_msg = "to convert iocb to wqe";
James Smart2f077842016-12-19 15:07:29 -080018498 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040018499 fail_msg = " - Wq is full";
18500 else
18501 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
18502
18503 if (fail_msg) {
18504 /* Failed means we can't issue and need to cancel */
18505 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18506 "2822 IOCB failed %s iotag 0x%x "
18507 "xri 0x%x\n",
18508 fail_msg,
18509 piocbq->iotag, piocbq->sli4_xritag);
18510 list_add_tail(&piocbq->list, &completions);
18511 }
James Smart398d81c2013-05-31 17:04:19 -040018512 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040018513 }
18514
James Smart2a9bf3d2010-06-07 15:24:45 -040018515 /* Cancel all the IOCBs that cannot be issued */
18516 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
18517 IOERR_SLI_ABORTED);
18518
James Smart0e9bb8d2013-03-01 16:35:12 -050018519 return txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040018520}
James Smart895427b2017-02-12 13:52:30 -080018521
18522/**
18523 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
18524 * @phba: Pointer to HBA context object.
18525 * @pwqe: Pointer to command WQE.
18526 * @sglq: Pointer to the scatter gather queue object.
18527 *
18528 * This routine converts the bpl or bde that is in the WQE
18529 * to a sgl list for the sli4 hardware. The physical address
18530 * of the bpl/bde is converted back to a virtual address.
18531 * If the WQE contains a BPL then the list of BDE's is
18532 * converted to sli4_sge's. If the WQE contains a single
18533 * BDE then it is converted to a single sli_sge.
18534 * The WQE is still in cpu endianness so the contents of
18535 * the bpl can be used without byte swapping.
18536 *
18537 * Returns valid XRI = Success, NO_XRI = Failure.
18538 */
18539static uint16_t
18540lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
18541 struct lpfc_sglq *sglq)
18542{
18543 uint16_t xritag = NO_XRI;
18544 struct ulp_bde64 *bpl = NULL;
18545 struct ulp_bde64 bde;
18546 struct sli4_sge *sgl = NULL;
18547 struct lpfc_dmabuf *dmabuf;
18548 union lpfc_wqe *wqe;
18549 int numBdes = 0;
18550 int i = 0;
18551 uint32_t offset = 0; /* accumulated offset in the sg request list */
18552 int inbound = 0; /* number of sg reply entries inbound from firmware */
18553 uint32_t cmd;
18554
18555 if (!pwqeq || !sglq)
18556 return xritag;
18557
18558 sgl = (struct sli4_sge *)sglq->sgl;
18559 wqe = &pwqeq->wqe;
18560 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
18561
18562 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
18563 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
18564 return sglq->sli4_xritag;
18565 numBdes = pwqeq->rsvd2;
18566 if (numBdes) {
18567 /* The addrHigh and addrLow fields within the WQE
18568 * have not been byteswapped yet so there is no
18569 * need to swap them back.
18570 */
18571 if (pwqeq->context3)
18572 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
18573 else
18574 return xritag;
18575
18576 bpl = (struct ulp_bde64 *)dmabuf->virt;
18577 if (!bpl)
18578 return xritag;
18579
18580 for (i = 0; i < numBdes; i++) {
18581 /* Should already be byte swapped. */
18582 sgl->addr_hi = bpl->addrHigh;
18583 sgl->addr_lo = bpl->addrLow;
18584
18585 sgl->word2 = le32_to_cpu(sgl->word2);
18586 if ((i+1) == numBdes)
18587 bf_set(lpfc_sli4_sge_last, sgl, 1);
18588 else
18589 bf_set(lpfc_sli4_sge_last, sgl, 0);
18590 /* swap the size field back to the cpu so we
18591 * can assign it to the sgl.
18592 */
18593 bde.tus.w = le32_to_cpu(bpl->tus.w);
18594 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
18595 /* The offsets in the sgl need to be accumulated
18596 * separately for the request and reply lists.
18597 * The request is always first, the reply follows.
18598 */
18599 switch (cmd) {
18600 case CMD_GEN_REQUEST64_WQE:
18601 /* add up the reply sg entries */
18602 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
18603 inbound++;
18604 /* first inbound? reset the offset */
18605 if (inbound == 1)
18606 offset = 0;
18607 bf_set(lpfc_sli4_sge_offset, sgl, offset);
18608 bf_set(lpfc_sli4_sge_type, sgl,
18609 LPFC_SGE_TYPE_DATA);
18610 offset += bde.tus.f.bdeSize;
18611 break;
18612 case CMD_FCP_TRSP64_WQE:
18613 bf_set(lpfc_sli4_sge_offset, sgl, 0);
18614 bf_set(lpfc_sli4_sge_type, sgl,
18615 LPFC_SGE_TYPE_DATA);
18616 break;
18617 case CMD_FCP_TSEND64_WQE:
18618 case CMD_FCP_TRECEIVE64_WQE:
18619 bf_set(lpfc_sli4_sge_type, sgl,
18620 bpl->tus.f.bdeFlags);
18621 if (i < 3)
18622 offset = 0;
18623 else
18624 offset += bde.tus.f.bdeSize;
18625 bf_set(lpfc_sli4_sge_offset, sgl, offset);
18626 break;
18627 }
18628 sgl->word2 = cpu_to_le32(sgl->word2);
18629 bpl++;
18630 sgl++;
18631 }
18632 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
18633 /* The addrHigh and addrLow fields of the BDE have not
18634 * been byteswapped yet so they need to be swapped
18635 * before putting them in the sgl.
18636 */
18637 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
18638 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
18639 sgl->word2 = le32_to_cpu(sgl->word2);
18640 bf_set(lpfc_sli4_sge_last, sgl, 1);
18641 sgl->word2 = cpu_to_le32(sgl->word2);
18642 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
18643 }
18644 return sglq->sli4_xritag;
18645}
18646
18647/**
18648 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
18649 * @phba: Pointer to HBA context object.
18650 * @ring_number: Base sli ring number
18651 * @pwqe: Pointer to command WQE.
18652 **/
18653int
18654lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18655 struct lpfc_iocbq *pwqe)
18656{
18657 union lpfc_wqe *wqe = &pwqe->wqe;
James Smartf358dd02017-02-12 13:52:34 -080018658 struct lpfc_nvmet_rcv_ctx *ctxp;
James Smart895427b2017-02-12 13:52:30 -080018659 struct lpfc_queue *wq;
18660 struct lpfc_sglq *sglq;
18661 struct lpfc_sli_ring *pring;
18662 unsigned long iflags;
18663
18664 /* NVME_LS and NVME_LS ABTS requests. */
18665 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
18666 pring = phba->sli4_hba.nvmels_wq->pring;
18667 spin_lock_irqsave(&pring->ring_lock, iflags);
18668 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
18669 if (!sglq) {
18670 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18671 return WQE_BUSY;
18672 }
18673 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18674 pwqe->sli4_xritag = sglq->sli4_xritag;
18675 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
18676 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18677 return WQE_ERROR;
18678 }
18679 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18680 pwqe->sli4_xritag);
18681 if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
18682 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18683 return WQE_ERROR;
18684 }
18685 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18686 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18687 return 0;
18688 }
18689
18690 /* NVME_FCREQ and NVME_ABTS requests */
18691 if (pwqe->iocb_flag & LPFC_IO_NVME) {
18692 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18693 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18694
18695 spin_lock_irqsave(&pring->ring_lock, iflags);
18696 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18697 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18698 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18699 if (lpfc_sli4_wq_put(wq, wqe)) {
18700 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18701 return WQE_ERROR;
18702 }
18703 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18704 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18705 return 0;
18706 }
18707
James Smartf358dd02017-02-12 13:52:34 -080018708 /* NVMET requests */
18709 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
18710 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18711 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18712
18713 spin_lock_irqsave(&pring->ring_lock, iflags);
18714 ctxp = pwqe->context2;
18715 sglq = ctxp->rqb_buffer->sglq;
18716 if (pwqe->sli4_xritag == NO_XRI) {
18717 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18718 pwqe->sli4_xritag = sglq->sli4_xritag;
18719 }
18720 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18721 pwqe->sli4_xritag);
18722 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18723 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18724 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18725 if (lpfc_sli4_wq_put(wq, wqe)) {
18726 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18727 return WQE_ERROR;
18728 }
18729 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18730 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18731 return 0;
18732 }
James Smart895427b2017-02-12 13:52:30 -080018733 return WQE_ERROR;
18734}