blob: df77e75b9f533d5f7d04289fa0e644ae48e47360 [file] [log] [blame]
dea31012005-04-17 16:05:31 -05001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04003 * Fibre Channel Host Bus Adapters. *
James Smart145e5a82020-01-27 16:23:12 -08004 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
James Smart3e21d1c2018-05-04 20:37:59 -07005 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
James Smart50611572016-03-31 14:12:34 -07006 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04007 * EMULEX and SLI are trademarks of Emulex. *
James Smartd080abe2017-02-12 13:52:39 -08008 * www.broadcom.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04009 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -050010 * *
11 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040012 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea31012005-04-17 16:05:31 -050022 *******************************************************************/
23
dea31012005-04-17 16:05:31 -050024#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010029#include <linux/lockdep.h>
dea31012005-04-17 16:05:31 -050030
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040031#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050032#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
James.Smart@Emulex.Comf888ba32005-08-10 15:03:01 -040035#include <scsi/scsi_transport_fc.h>
James Smartda0436e2009-05-22 14:51:39 -040036#include <scsi/fc/fc_fs.h>
James Smart0d878412009-10-02 15:16:56 -040037#include <linux/aer.h>
James Smart1351e692018-02-22 08:18:43 -080038#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
dea31012005-04-17 16:05:31 -050041
James Smart895427b2017-02-12 13:52:30 -080042#include <linux/nvme-fc-driver.h>
43
James Smartda0436e2009-05-22 14:51:39 -040044#include "lpfc_hw4.h"
dea31012005-04-17 16:05:31 -050045#include "lpfc_hw.h"
46#include "lpfc_sli.h"
James Smartda0436e2009-05-22 14:51:39 -040047#include "lpfc_sli4.h"
James Smartea2151b2008-09-07 11:52:10 -040048#include "lpfc_nl.h"
dea31012005-04-17 16:05:31 -050049#include "lpfc_disc.h"
dea31012005-04-17 16:05:31 -050050#include "lpfc.h"
James Smart895427b2017-02-12 13:52:30 -080051#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
James Smartf358dd02017-02-12 13:52:34 -080053#include "lpfc_nvmet.h"
dea31012005-04-17 16:05:31 -050054#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
James Smart858c9f62007-06-17 19:56:39 -050057#include "lpfc_debugfs.h"
James Smart04c68492009-05-22 14:52:52 -040058#include "lpfc_vport.h"
James Smart61bda8f2016-10-13 15:06:05 -070059#include "lpfc_version.h"
dea31012005-04-17 16:05:31 -050060
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
James Smart4f774512009-05-22 14:52:35 -040069
70/* Provide function prototypes local to this module. */
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
James Smart45ed1192009-10-02 15:17:02 -040074 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
James Smart6669f9b2009-10-02 15:16:45 -040077static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
James Smartae9e28f2017-05-15 15:20:51 -070079static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
James Smart32517fc2019-01-28 11:14:33 -080081static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
James Smart895427b2017-02-12 13:52:30 -080083static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
James Smart8a9d2e82012-05-09 21:16:12 -040084 int);
Dick Kennedyf485c182017-09-29 17:34:34 -070085static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
James Smart32517fc2019-01-28 11:14:33 -080086 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
James Smarte8d3c3b2013-10-10 12:21:30 -040088static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
James Smart24c7c0a2019-09-21 20:58:58 -070090static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
91static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
92 struct lpfc_queue *cq,
93 struct lpfc_cqe *cqe);
James Smart05580562011-05-24 11:40:48 -040094
James Smart4f774512009-05-22 14:52:35 -040095static IOCB_t *
96lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
97{
98 return &iocbq->iocb;
99}
100
James Smart48f8fdb2018-05-04 20:37:51 -0700101#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
102/**
103 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
104 * @srcp: Source memory pointer.
105 * @destp: Destination memory pointer.
106 * @cnt: Number of words required to be copied.
107 * Must be a multiple of sizeof(uint64_t)
108 *
109 * This function is used for copying data between driver memory
110 * and the SLI WQ. This function also changes the endianness
111 * of each word if native endianness is different from SLI
112 * endianness. This function can be called with or without
113 * lock.
114 **/
YueHaibingd7b761b2019-05-31 23:28:41 +0800115static void
James Smart48f8fdb2018-05-04 20:37:51 -0700116lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
117{
118 uint64_t *src = srcp;
119 uint64_t *dest = destp;
120 int i;
121
122 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
123 *dest++ = *src++;
124}
125#else
126#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
127#endif
128
James Smart4f774512009-05-22 14:52:35 -0400129/**
130 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
131 * @q: The Work Queue to operate on.
132 * @wqe: The work Queue Entry to put on the Work queue.
133 *
134 * This routine will copy the contents of @wqe to the next available entry on
135 * the @q. This function will then ring the Work Queue Doorbell to signal the
136 * HBA to start processing the Work Queue Entry. This function returns 0 if
137 * successful. If no entries are available on @q then this function will return
138 * -ENOMEM.
139 * The caller is expected to hold the hbalock when calling this routine.
140 **/
Dick Kennedycd22d602017-08-23 16:55:35 -0700141static int
James Smart205e8242018-03-05 12:04:03 -0800142lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
James Smart4f774512009-05-22 14:52:35 -0400143{
James Smart2e90f4b2011-12-13 13:22:37 -0500144 union lpfc_wqe *temp_wqe;
James Smart4f774512009-05-22 14:52:35 -0400145 struct lpfc_register doorbell;
146 uint32_t host_index;
James Smart027140e2012-08-03 12:35:44 -0400147 uint32_t idx;
James Smart1351e692018-02-22 08:18:43 -0800148 uint32_t i = 0;
149 uint8_t *tmp;
James Smart5cc167d2018-06-26 08:24:23 -0700150 u32 if_type;
James Smart4f774512009-05-22 14:52:35 -0400151
James Smart2e90f4b2011-12-13 13:22:37 -0500152 /* sanity check on queue memory */
153 if (unlikely(!q))
154 return -ENOMEM;
James Smart9afbee32019-03-12 16:30:28 -0700155 temp_wqe = lpfc_sli4_qe(q, q->host_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500156
James Smart4f774512009-05-22 14:52:35 -0400157 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400158 idx = ((q->host_index + 1) % q->entry_count);
159 if (idx == q->hba_index) {
James Smartb84daac2012-08-03 12:35:13 -0400160 q->WQ_overflow++;
Dick Kennedycd22d602017-08-23 16:55:35 -0700161 return -EBUSY;
James Smartb84daac2012-08-03 12:35:13 -0400162 }
163 q->WQ_posted++;
James Smart4f774512009-05-22 14:52:35 -0400164 /* set consumption flag every once in a while */
James Smart32517fc2019-01-28 11:14:33 -0800165 if (!((q->host_index + 1) % q->notify_interval))
James Smartf0d9bcc2010-10-22 11:07:09 -0400166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
James Smart04673e32018-01-30 15:58:45 -0800167 else
168 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
James Smartfedd3b72011-02-16 12:39:24 -0500169 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
170 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
James Smart48f8fdb2018-05-04 20:37:51 -0700171 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
James Smart1351e692018-02-22 08:18:43 -0800172 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
173 /* write to DPP aperture taking advatage of Combined Writes */
James Smart4c066192018-03-05 10:29:03 -0800174 tmp = (uint8_t *)temp_wqe;
175#ifdef __raw_writeq
James Smart1351e692018-02-22 08:18:43 -0800176 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
James Smart4c066192018-03-05 10:29:03 -0800177 __raw_writeq(*((uint64_t *)(tmp + i)),
178 q->dpp_regaddr + i);
179#else
180 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
181 __raw_writel(*((uint32_t *)(tmp + i)),
182 q->dpp_regaddr + i);
183#endif
James Smart1351e692018-02-22 08:18:43 -0800184 }
185 /* ensure WQE bcopy and DPP flushed before doorbell write */
James Smart6b3b3bd2016-12-19 15:07:30 -0800186 wmb();
James Smart4f774512009-05-22 14:52:35 -0400187
188 /* Update the host index before invoking device */
189 host_index = q->host_index;
James Smart027140e2012-08-03 12:35:44 -0400190
191 q->host_index = idx;
James Smart4f774512009-05-22 14:52:35 -0400192
193 /* Ring Doorbell */
194 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500195 if (q->db_format == LPFC_DB_LIST_FORMAT) {
James Smart1351e692018-02-22 08:18:43 -0800196 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
197 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
198 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
199 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
200 q->dpp_id);
201 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
202 q->queue_id);
203 } else {
204 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
James Smart1351e692018-02-22 08:18:43 -0800205 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
James Smart5cc167d2018-06-26 08:24:23 -0700206
207 /* Leave bits <23:16> clear for if_type 6 dpp */
208 if_type = bf_get(lpfc_sli_intf_if_type,
209 &q->phba->sli4_hba.sli_intf);
210 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
211 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
212 host_index);
James Smart1351e692018-02-22 08:18:43 -0800213 }
James Smart962bc512013-01-03 15:44:00 -0500214 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
215 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
216 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
217 } else {
218 return -EINVAL;
219 }
220 writel(doorbell.word0, q->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400221
222 return 0;
223}
224
225/**
226 * lpfc_sli4_wq_release - Updates internal hba index for WQ
227 * @q: The Work Queue to operate on.
228 * @index: The index to advance the hba index to.
229 *
230 * This routine will update the HBA index of a queue to reflect consumption of
231 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
232 * an entry the host calls this function to update the queue's internal
James Smart1543af32020-03-22 11:12:58 -0700233 * pointers.
James Smart4f774512009-05-22 14:52:35 -0400234 **/
James Smart1543af32020-03-22 11:12:58 -0700235static void
James Smart4f774512009-05-22 14:52:35 -0400236lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
237{
James Smart2e90f4b2011-12-13 13:22:37 -0500238 /* sanity check on queue memory */
239 if (unlikely(!q))
James Smart1543af32020-03-22 11:12:58 -0700240 return;
James Smart2e90f4b2011-12-13 13:22:37 -0500241
James Smart1543af32020-03-22 11:12:58 -0700242 q->hba_index = index;
James Smart4f774512009-05-22 14:52:35 -0400243}
244
245/**
246 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
247 * @q: The Mailbox Queue to operate on.
248 * @wqe: The Mailbox Queue Entry to put on the Work queue.
249 *
250 * This routine will copy the contents of @mqe to the next available entry on
251 * the @q. This function will then ring the Work Queue Doorbell to signal the
252 * HBA to start processing the Work Queue Entry. This function returns 0 if
253 * successful. If no entries are available on @q then this function will return
254 * -ENOMEM.
255 * The caller is expected to hold the hbalock when calling this routine.
256 **/
257static uint32_t
258lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
259{
James Smart2e90f4b2011-12-13 13:22:37 -0500260 struct lpfc_mqe *temp_mqe;
James Smart4f774512009-05-22 14:52:35 -0400261 struct lpfc_register doorbell;
James Smart4f774512009-05-22 14:52:35 -0400262
James Smart2e90f4b2011-12-13 13:22:37 -0500263 /* sanity check on queue memory */
264 if (unlikely(!q))
265 return -ENOMEM;
James Smart9afbee32019-03-12 16:30:28 -0700266 temp_mqe = lpfc_sli4_qe(q, q->host_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500267
James Smart4f774512009-05-22 14:52:35 -0400268 /* If the host has not yet processed the next entry then we are done */
269 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
270 return -ENOMEM;
James Smart48f8fdb2018-05-04 20:37:51 -0700271 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
James Smart4f774512009-05-22 14:52:35 -0400272 /* Save off the mailbox pointer for completion */
273 q->phba->mbox = (MAILBOX_t *)temp_mqe;
274
275 /* Update the host index before invoking device */
James Smart4f774512009-05-22 14:52:35 -0400276 q->host_index = ((q->host_index + 1) % q->entry_count);
277
278 /* Ring Doorbell */
279 doorbell.word0 = 0;
280 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
281 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
282 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400283 return 0;
284}
285
286/**
287 * lpfc_sli4_mq_release - Updates internal hba index for MQ
288 * @q: The Mailbox Queue to operate on.
289 *
290 * This routine will update the HBA index of a queue to reflect consumption of
291 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
292 * an entry the host calls this function to update the queue's internal
293 * pointers. This routine returns the number of entries that were consumed by
294 * the HBA.
295 **/
296static uint32_t
297lpfc_sli4_mq_release(struct lpfc_queue *q)
298{
James Smart2e90f4b2011-12-13 13:22:37 -0500299 /* sanity check on queue memory */
300 if (unlikely(!q))
301 return 0;
302
James Smart4f774512009-05-22 14:52:35 -0400303 /* Clear the mailbox pointer for completion */
304 q->phba->mbox = NULL;
305 q->hba_index = ((q->hba_index + 1) % q->entry_count);
306 return 1;
307}
308
309/**
310 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
311 * @q: The Event Queue to get the first valid EQE from
312 *
313 * This routine will get the first valid Event Queue Entry from @q, update
314 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
315 * the Queue (no more work to do), or the Queue is full of EQEs that have been
316 * processed, but not popped back to the HBA then this routine will return NULL.
317 **/
318static struct lpfc_eqe *
319lpfc_sli4_eq_get(struct lpfc_queue *q)
320{
James Smart2e90f4b2011-12-13 13:22:37 -0500321 struct lpfc_eqe *eqe;
322
323 /* sanity check on queue memory */
324 if (unlikely(!q))
325 return NULL;
James Smart9afbee32019-03-12 16:30:28 -0700326 eqe = lpfc_sli4_qe(q, q->host_index);
James Smart4f774512009-05-22 14:52:35 -0400327
328 /* If the next EQE is not valid then we are done */
James Smart7365f6f2018-02-22 08:18:46 -0800329 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
James Smart4f774512009-05-22 14:52:35 -0400330 return NULL;
James Smart27f344e2014-05-07 17:16:46 -0400331
332 /*
333 * insert barrier for instruction interlock : data from the hardware
334 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800335 * upon. Speculative instructions were allowing a bcopy at the start
336 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
337 * after our return, to copy data before the valid bit check above
338 * was done. As such, some of the copied data was stale. The barrier
339 * ensures the check is before any data is copied.
James Smart27f344e2014-05-07 17:16:46 -0400340 */
341 mb();
James Smart4f774512009-05-22 14:52:35 -0400342 return eqe;
343}
344
345/**
James Smartba20c852012-08-03 12:36:52 -0400346 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
347 * @q: The Event Queue to disable interrupts
348 *
349 **/
James Smart92f3b322019-03-20 10:44:22 -0700350void
James Smartba20c852012-08-03 12:36:52 -0400351lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
352{
353 struct lpfc_register doorbell;
354
355 doorbell.word0 = 0;
356 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
357 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
358 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
359 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
360 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
James Smart9dd35422018-02-22 08:18:41 -0800361 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
James Smartba20c852012-08-03 12:36:52 -0400362}
363
364/**
James Smart27d6ac02018-02-22 08:18:42 -0800365 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
366 * @q: The Event Queue to disable interrupts
367 *
368 **/
James Smart92f3b322019-03-20 10:44:22 -0700369void
James Smart27d6ac02018-02-22 08:18:42 -0800370lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
371{
372 struct lpfc_register doorbell;
373
374 doorbell.word0 = 0;
James Smartaad59d52018-09-10 10:30:47 -0700375 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
James Smart27d6ac02018-02-22 08:18:42 -0800376 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
377}
378
379/**
James Smart32517fc2019-01-28 11:14:33 -0800380 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
381 * @phba: adapter with EQ
James Smart4f774512009-05-22 14:52:35 -0400382 * @q: The Event Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800383 * @count: Number of elements that have been consumed
James Smart4f774512009-05-22 14:52:35 -0400384 * @arm: Indicates whether the host wants to arms this CQ.
385 *
James Smart32517fc2019-01-28 11:14:33 -0800386 * This routine will notify the HBA, by ringing the doorbell, that count
387 * number of EQEs have been processed. The @arm parameter indicates whether
388 * the queue should be rearmed when ringing the doorbell.
James Smart4f774512009-05-22 14:52:35 -0400389 **/
James Smart32517fc2019-01-28 11:14:33 -0800390void
391lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
392 uint32_t count, bool arm)
James Smart4f774512009-05-22 14:52:35 -0400393{
James Smart4f774512009-05-22 14:52:35 -0400394 struct lpfc_register doorbell;
395
James Smart2e90f4b2011-12-13 13:22:37 -0500396 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800397 if (unlikely(!q || (count == 0 && !arm)))
398 return;
James Smart4f774512009-05-22 14:52:35 -0400399
400 /* ring doorbell for number popped */
401 doorbell.word0 = 0;
402 if (arm) {
403 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
404 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
405 }
James Smart32517fc2019-01-28 11:14:33 -0800406 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
James Smart4f774512009-05-22 14:52:35 -0400407 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
James Smart6b5151f2012-01-18 16:24:06 -0500408 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
409 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
410 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
James Smart9dd35422018-02-22 08:18:41 -0800411 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
James Smarta747c9c2009-11-18 15:41:10 -0500412 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
413 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
James Smart9dd35422018-02-22 08:18:41 -0800414 readl(q->phba->sli4_hba.EQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400415}
416
417/**
James Smart32517fc2019-01-28 11:14:33 -0800418 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
419 * @phba: adapter with EQ
James Smart27d6ac02018-02-22 08:18:42 -0800420 * @q: The Event Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800421 * @count: Number of elements that have been consumed
James Smart27d6ac02018-02-22 08:18:42 -0800422 * @arm: Indicates whether the host wants to arms this CQ.
423 *
James Smart32517fc2019-01-28 11:14:33 -0800424 * This routine will notify the HBA, by ringing the doorbell, that count
425 * number of EQEs have been processed. The @arm parameter indicates whether
426 * the queue should be rearmed when ringing the doorbell.
James Smart27d6ac02018-02-22 08:18:42 -0800427 **/
James Smart32517fc2019-01-28 11:14:33 -0800428void
429lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
430 uint32_t count, bool arm)
James Smart27d6ac02018-02-22 08:18:42 -0800431{
James Smart27d6ac02018-02-22 08:18:42 -0800432 struct lpfc_register doorbell;
433
434 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800435 if (unlikely(!q || (count == 0 && !arm)))
436 return;
James Smart27d6ac02018-02-22 08:18:42 -0800437
438 /* ring doorbell for number popped */
439 doorbell.word0 = 0;
440 if (arm)
441 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
James Smart32517fc2019-01-28 11:14:33 -0800442 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
James Smart27d6ac02018-02-22 08:18:42 -0800443 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
444 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
445 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
446 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
447 readl(q->phba->sli4_hba.EQDBregaddr);
James Smart32517fc2019-01-28 11:14:33 -0800448}
449
450static void
451__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
452 struct lpfc_eqe *eqe)
453{
454 if (!phba->sli4_hba.pc_sli4_params.eqav)
455 bf_set_le32(lpfc_eqe_valid, eqe, 0);
456
457 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
458
459 /* if the index wrapped around, toggle the valid bit */
460 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
461 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
462}
463
464static void
James Smart24c7c0a2019-09-21 20:58:58 -0700465lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
James Smart32517fc2019-01-28 11:14:33 -0800466{
James Smart24c7c0a2019-09-21 20:58:58 -0700467 struct lpfc_eqe *eqe = NULL;
468 u32 eq_count = 0, cq_count = 0;
469 struct lpfc_cqe *cqe = NULL;
470 struct lpfc_queue *cq = NULL, *childq = NULL;
471 int cqid = 0;
James Smart32517fc2019-01-28 11:14:33 -0800472
473 /* walk all the EQ entries and drop on the floor */
474 eqe = lpfc_sli4_eq_get(eq);
475 while (eqe) {
James Smart24c7c0a2019-09-21 20:58:58 -0700476 /* Get the reference to the corresponding CQ */
477 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
478 cq = NULL;
479
480 list_for_each_entry(childq, &eq->child_list, list) {
481 if (childq->queue_id == cqid) {
482 cq = childq;
483 break;
484 }
485 }
486 /* If CQ is valid, iterate through it and drop all the CQEs */
487 if (cq) {
488 cqe = lpfc_sli4_cq_get(cq);
489 while (cqe) {
490 __lpfc_sli4_consume_cqe(phba, cq, cqe);
491 cq_count++;
492 cqe = lpfc_sli4_cq_get(cq);
493 }
494 /* Clear and re-arm the CQ */
495 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
496 LPFC_QUEUE_REARM);
497 cq_count = 0;
498 }
James Smart32517fc2019-01-28 11:14:33 -0800499 __lpfc_sli4_consume_eqe(phba, eq, eqe);
James Smart24c7c0a2019-09-21 20:58:58 -0700500 eq_count++;
James Smart32517fc2019-01-28 11:14:33 -0800501 eqe = lpfc_sli4_eq_get(eq);
502 }
503
504 /* Clear and re-arm the EQ */
James Smart24c7c0a2019-09-21 20:58:58 -0700505 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
James Smart32517fc2019-01-28 11:14:33 -0800506}
507
508static int
James Smart93a4d6f2019-11-04 16:57:05 -0800509lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
510 uint8_t rearm)
James Smart32517fc2019-01-28 11:14:33 -0800511{
512 struct lpfc_eqe *eqe;
513 int count = 0, consumed = 0;
514
515 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
516 goto rearm_and_exit;
517
518 eqe = lpfc_sli4_eq_get(eq);
519 while (eqe) {
520 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
521 __lpfc_sli4_consume_eqe(phba, eq, eqe);
522
523 consumed++;
524 if (!(++count % eq->max_proc_limit))
525 break;
526
527 if (!(count % eq->notify_interval)) {
528 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
529 LPFC_QUEUE_NOARM);
530 consumed = 0;
531 }
532
533 eqe = lpfc_sli4_eq_get(eq);
534 }
535 eq->EQ_processed += count;
536
537 /* Track the max number of EQEs processed in 1 intr */
538 if (count > eq->EQ_max_eqe)
539 eq->EQ_max_eqe = count;
540
Dick Kennedy164ba8d2020-05-01 14:43:03 -0700541 xchg(&eq->queue_claimed, 0);
James Smart32517fc2019-01-28 11:14:33 -0800542
543rearm_and_exit:
James Smart93a4d6f2019-11-04 16:57:05 -0800544 /* Always clear the EQ. */
545 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
James Smart32517fc2019-01-28 11:14:33 -0800546
547 return count;
James Smart27d6ac02018-02-22 08:18:42 -0800548}
549
550/**
James Smart4f774512009-05-22 14:52:35 -0400551 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
552 * @q: The Completion Queue to get the first valid CQE from
553 *
554 * This routine will get the first valid Completion Queue Entry from @q, update
555 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
556 * the Queue (no more work to do), or the Queue is full of CQEs that have been
557 * processed, but not popped back to the HBA then this routine will return NULL.
558 **/
559static struct lpfc_cqe *
560lpfc_sli4_cq_get(struct lpfc_queue *q)
561{
562 struct lpfc_cqe *cqe;
563
James Smart2e90f4b2011-12-13 13:22:37 -0500564 /* sanity check on queue memory */
565 if (unlikely(!q))
566 return NULL;
James Smart9afbee32019-03-12 16:30:28 -0700567 cqe = lpfc_sli4_qe(q, q->host_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500568
James Smart4f774512009-05-22 14:52:35 -0400569 /* If the next CQE is not valid then we are done */
James Smart7365f6f2018-02-22 08:18:46 -0800570 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
James Smart4f774512009-05-22 14:52:35 -0400571 return NULL;
James Smart27f344e2014-05-07 17:16:46 -0400572
573 /*
574 * insert barrier for instruction interlock : data from the hardware
575 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800576 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
577 * instructions allowing action on content before valid bit checked,
578 * add barrier here as well. May not be needed as "content" is a
579 * single 32-bit entity here (vs multi word structure for cq's).
James Smart27f344e2014-05-07 17:16:46 -0400580 */
581 mb();
James Smart4f774512009-05-22 14:52:35 -0400582 return cqe;
583}
584
James Smart32517fc2019-01-28 11:14:33 -0800585static void
586__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
587 struct lpfc_cqe *cqe)
588{
589 if (!phba->sli4_hba.pc_sli4_params.cqav)
590 bf_set_le32(lpfc_cqe_valid, cqe, 0);
591
592 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
593
594 /* if the index wrapped around, toggle the valid bit */
595 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
596 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
597}
598
James Smart4f774512009-05-22 14:52:35 -0400599/**
James Smart32517fc2019-01-28 11:14:33 -0800600 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
601 * @phba: the adapter with the CQ
James Smart4f774512009-05-22 14:52:35 -0400602 * @q: The Completion Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800603 * @count: the number of elements that were consumed
James Smart4f774512009-05-22 14:52:35 -0400604 * @arm: Indicates whether the host wants to arms this CQ.
605 *
James Smart32517fc2019-01-28 11:14:33 -0800606 * This routine will notify the HBA, by ringing the doorbell, that the
607 * CQEs have been processed. The @arm parameter specifies whether the
608 * queue should be rearmed when ringing the doorbell.
James Smart4f774512009-05-22 14:52:35 -0400609 **/
James Smart32517fc2019-01-28 11:14:33 -0800610void
611lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
612 uint32_t count, bool arm)
James Smart4f774512009-05-22 14:52:35 -0400613{
James Smart4f774512009-05-22 14:52:35 -0400614 struct lpfc_register doorbell;
615
James Smart2e90f4b2011-12-13 13:22:37 -0500616 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800617 if (unlikely(!q || (count == 0 && !arm)))
618 return;
James Smart4f774512009-05-22 14:52:35 -0400619
620 /* ring doorbell for number popped */
621 doorbell.word0 = 0;
622 if (arm)
623 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
James Smart32517fc2019-01-28 11:14:33 -0800624 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
James Smart4f774512009-05-22 14:52:35 -0400625 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
James Smart6b5151f2012-01-18 16:24:06 -0500626 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
627 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
628 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
James Smart9dd35422018-02-22 08:18:41 -0800629 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400630}
631
632/**
James Smart32517fc2019-01-28 11:14:33 -0800633 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
634 * @phba: the adapter with the CQ
James Smart27d6ac02018-02-22 08:18:42 -0800635 * @q: The Completion Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800636 * @count: the number of elements that were consumed
James Smart27d6ac02018-02-22 08:18:42 -0800637 * @arm: Indicates whether the host wants to arms this CQ.
638 *
James Smart32517fc2019-01-28 11:14:33 -0800639 * This routine will notify the HBA, by ringing the doorbell, that the
640 * CQEs have been processed. The @arm parameter specifies whether the
641 * queue should be rearmed when ringing the doorbell.
James Smart27d6ac02018-02-22 08:18:42 -0800642 **/
James Smart32517fc2019-01-28 11:14:33 -0800643void
644lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
645 uint32_t count, bool arm)
James Smart27d6ac02018-02-22 08:18:42 -0800646{
James Smart27d6ac02018-02-22 08:18:42 -0800647 struct lpfc_register doorbell;
648
649 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800650 if (unlikely(!q || (count == 0 && !arm)))
651 return;
James Smart27d6ac02018-02-22 08:18:42 -0800652
653 /* ring doorbell for number popped */
654 doorbell.word0 = 0;
655 if (arm)
656 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
James Smart32517fc2019-01-28 11:14:33 -0800657 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
James Smart27d6ac02018-02-22 08:18:42 -0800658 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
659 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
James Smart27d6ac02018-02-22 08:18:42 -0800660}
661
662/**
James Smart4f774512009-05-22 14:52:35 -0400663 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
664 * @q: The Header Receive Queue to operate on.
665 * @wqe: The Receive Queue Entry to put on the Receive queue.
666 *
667 * This routine will copy the contents of @wqe to the next available entry on
668 * the @q. This function will then ring the Receive Queue Doorbell to signal the
669 * HBA to start processing the Receive Queue Entry. This function returns the
670 * index that the rqe was copied to if successful. If no entries are available
671 * on @q then this function will return -ENOMEM.
672 * The caller is expected to hold the hbalock when calling this routine.
673 **/
James Smart895427b2017-02-12 13:52:30 -0800674int
James Smart4f774512009-05-22 14:52:35 -0400675lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
676 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
677{
James Smart2e90f4b2011-12-13 13:22:37 -0500678 struct lpfc_rqe *temp_hrqe;
679 struct lpfc_rqe *temp_drqe;
James Smart4f774512009-05-22 14:52:35 -0400680 struct lpfc_register doorbell;
James Smartcbc5de12017-12-08 17:18:04 -0800681 int hq_put_index;
682 int dq_put_index;
James Smart4f774512009-05-22 14:52:35 -0400683
James Smart2e90f4b2011-12-13 13:22:37 -0500684 /* sanity check on queue memory */
685 if (unlikely(!hq) || unlikely(!dq))
686 return -ENOMEM;
James Smartcbc5de12017-12-08 17:18:04 -0800687 hq_put_index = hq->host_index;
688 dq_put_index = dq->host_index;
James Smart9afbee32019-03-12 16:30:28 -0700689 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
690 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500691
James Smart4f774512009-05-22 14:52:35 -0400692 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
693 return -EINVAL;
James Smartcbc5de12017-12-08 17:18:04 -0800694 if (hq_put_index != dq_put_index)
James Smart4f774512009-05-22 14:52:35 -0400695 return -EINVAL;
696 /* If the host has not yet processed the next entry then we are done */
James Smartcbc5de12017-12-08 17:18:04 -0800697 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
James Smart4f774512009-05-22 14:52:35 -0400698 return -EBUSY;
James Smart48f8fdb2018-05-04 20:37:51 -0700699 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
700 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
James Smart4f774512009-05-22 14:52:35 -0400701
702 /* Update the host index to point to the next slot */
James Smartcbc5de12017-12-08 17:18:04 -0800703 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
704 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
James Smart61f3d4b2017-05-15 15:20:41 -0700705 hq->RQ_buf_posted++;
James Smart4f774512009-05-22 14:52:35 -0400706
707 /* Ring The Header Receive Queue Doorbell */
James Smart32517fc2019-01-28 11:14:33 -0800708 if (!(hq->host_index % hq->notify_interval)) {
James Smart4f774512009-05-22 14:52:35 -0400709 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500710 if (hq->db_format == LPFC_DB_RING_FORMAT) {
711 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
James Smart32517fc2019-01-28 11:14:33 -0800712 hq->notify_interval);
James Smart962bc512013-01-03 15:44:00 -0500713 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
714 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
715 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
James Smart32517fc2019-01-28 11:14:33 -0800716 hq->notify_interval);
James Smart962bc512013-01-03 15:44:00 -0500717 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
718 hq->host_index);
719 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
720 } else {
721 return -EINVAL;
722 }
723 writel(doorbell.word0, hq->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400724 }
James Smartcbc5de12017-12-08 17:18:04 -0800725 return hq_put_index;
James Smart4f774512009-05-22 14:52:35 -0400726}
727
728/**
729 * lpfc_sli4_rq_release - Updates internal hba index for RQ
730 * @q: The Header Receive Queue to operate on.
731 *
732 * This routine will update the HBA index of a queue to reflect consumption of
733 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
734 * consumed an entry the host calls this function to update the queue's
735 * internal pointers. This routine returns the number of entries that were
736 * consumed by the HBA.
737 **/
738static uint32_t
739lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
740{
James Smart2e90f4b2011-12-13 13:22:37 -0500741 /* sanity check on queue memory */
742 if (unlikely(!hq) || unlikely(!dq))
743 return 0;
744
James Smart4f774512009-05-22 14:52:35 -0400745 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
746 return 0;
747 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
748 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
749 return 1;
750}
751
James Smarte59058c2008-08-24 21:49:00 -0400752/**
James Smart3621a712009-04-06 18:47:14 -0400753 * lpfc_cmd_iocb - Get next command iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400754 * @phba: Pointer to HBA context object.
755 * @pring: Pointer to driver SLI ring object.
756 *
757 * This function returns pointer to next command iocb entry
758 * in the command ring. The caller must hold hbalock to prevent
759 * other threads consume the next command iocb.
760 * SLI-2/SLI-3 provide different sized iocbs.
761 **/
James Smarted957682007-06-17 19:56:37 -0500762static inline IOCB_t *
763lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
764{
James Smart7e56aa22012-08-03 12:35:34 -0400765 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
766 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
James Smarted957682007-06-17 19:56:37 -0500767}
768
James Smarte59058c2008-08-24 21:49:00 -0400769/**
James Smart3621a712009-04-06 18:47:14 -0400770 * lpfc_resp_iocb - Get next response iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400771 * @phba: Pointer to HBA context object.
772 * @pring: Pointer to driver SLI ring object.
773 *
774 * This function returns pointer to next response iocb entry
775 * in the response ring. The caller must hold hbalock to make sure
776 * that no other thread consume the next response iocb.
777 * SLI-2/SLI-3 provide different sized iocbs.
778 **/
James Smarted957682007-06-17 19:56:37 -0500779static inline IOCB_t *
780lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
781{
James Smart7e56aa22012-08-03 12:35:34 -0400782 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
783 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
James Smarted957682007-06-17 19:56:37 -0500784}
785
James Smarte59058c2008-08-24 21:49:00 -0400786/**
James Smart3621a712009-04-06 18:47:14 -0400787 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400788 * @phba: Pointer to HBA context object.
789 *
790 * This function is called with hbalock held. This function
791 * allocates a new driver iocb object from the iocb pool. If the
792 * allocation is successful, it returns pointer to the newly
793 * allocated iocb object else it returns NULL.
794 **/
James Smart4f2e66c2012-05-09 21:17:07 -0400795struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -0500796__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400797{
798 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
799 struct lpfc_iocbq * iocbq = NULL;
800
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100801 lockdep_assert_held(&phba->hbalock);
802
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400803 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
James Smart2a9bf3d2010-06-07 15:24:45 -0400804 if (iocbq)
805 phba->iocb_cnt++;
806 if (phba->iocb_cnt > phba->iocb_max)
807 phba->iocb_max = phba->iocb_cnt;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400808 return iocbq;
809}
810
James Smarte59058c2008-08-24 21:49:00 -0400811/**
James Smartda0436e2009-05-22 14:51:39 -0400812 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
813 * @phba: Pointer to HBA context object.
814 * @xritag: XRI value.
815 *
816 * This function clears the sglq pointer from the array of acive
817 * sglq's. The xritag that is passed in is used to index into the
818 * array. Before the xritag can be used it needs to be adjusted
819 * by subtracting the xribase.
820 *
821 * Returns sglq ponter = success, NULL = Failure.
822 **/
James Smart895427b2017-02-12 13:52:30 -0800823struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400824__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
825{
James Smartda0436e2009-05-22 14:51:39 -0400826 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400827
828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
829 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
James Smartda0436e2009-05-22 14:51:39 -0400830 return sglq;
831}
832
833/**
834 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
835 * @phba: Pointer to HBA context object.
836 * @xritag: XRI value.
837 *
838 * This function returns the sglq pointer from the array of acive
839 * sglq's. The xritag that is passed in is used to index into the
840 * array. Before the xritag can be used it needs to be adjusted
841 * by subtracting the xribase.
842 *
843 * Returns sglq ponter = success, NULL = Failure.
844 **/
James Smart0f65ff62010-02-26 14:14:23 -0500845struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400846__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
847{
James Smartda0436e2009-05-22 14:51:39 -0400848 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400849
850 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
James Smartda0436e2009-05-22 14:51:39 -0400851 return sglq;
852}
853
854/**
James Smart1151e3e2011-02-16 12:39:35 -0500855 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500856 * @phba: Pointer to HBA context object.
857 * @xritag: xri used in this exchange.
858 * @rrq: The RRQ to be cleared.
859 *
James Smart19ca7602010-11-20 23:11:55 -0500860 **/
James Smart1151e3e2011-02-16 12:39:35 -0500861void
862lpfc_clr_rrq_active(struct lpfc_hba *phba,
863 uint16_t xritag,
864 struct lpfc_node_rrq *rrq)
James Smart19ca7602010-11-20 23:11:55 -0500865{
James Smart1151e3e2011-02-16 12:39:35 -0500866 struct lpfc_nodelist *ndlp = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500867
James Smart1151e3e2011-02-16 12:39:35 -0500868 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
869 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
James Smart19ca7602010-11-20 23:11:55 -0500870
871 /* The target DID could have been swapped (cable swap)
872 * we should use the ndlp from the findnode if it is
873 * available.
874 */
James Smart1151e3e2011-02-16 12:39:35 -0500875 if ((!ndlp) && rrq->ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500876 ndlp = rrq->ndlp;
877
James Smart1151e3e2011-02-16 12:39:35 -0500878 if (!ndlp)
879 goto out;
880
James Smartcff261f2013-12-17 20:29:47 -0500881 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500882 rrq->send_rrq = 0;
883 rrq->xritag = 0;
884 rrq->rrq_stop_time = 0;
885 }
James Smart1151e3e2011-02-16 12:39:35 -0500886out:
James Smart19ca7602010-11-20 23:11:55 -0500887 mempool_free(rrq, phba->rrq_pool);
888}
889
890/**
891 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
892 * @phba: Pointer to HBA context object.
893 *
894 * This function is called with hbalock held. This function
895 * Checks if stop_time (ratov from setting rrq active) has
896 * been reached, if it has and the send_rrq flag is set then
897 * it will call lpfc_send_rrq. If the send_rrq flag is not set
898 * then it will just call the routine to clear the rrq and
899 * free the rrq resource.
900 * The timer is set to the next rrq that is going to expire before
901 * leaving the routine.
902 *
903 **/
904void
905lpfc_handle_rrq_active(struct lpfc_hba *phba)
906{
907 struct lpfc_node_rrq *rrq;
908 struct lpfc_node_rrq *nextrrq;
909 unsigned long next_time;
910 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500911 LIST_HEAD(send_rrq);
James Smart19ca7602010-11-20 23:11:55 -0500912
913 spin_lock_irqsave(&phba->hbalock, iflags);
914 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
James Smart256ec0d2013-04-17 20:14:58 -0400915 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smart19ca7602010-11-20 23:11:55 -0500916 list_for_each_entry_safe(rrq, nextrrq,
James Smart1151e3e2011-02-16 12:39:35 -0500917 &phba->active_rrq_list, list) {
918 if (time_after(jiffies, rrq->rrq_stop_time))
919 list_move(&rrq->list, &send_rrq);
920 else if (time_before(rrq->rrq_stop_time, next_time))
James Smart19ca7602010-11-20 23:11:55 -0500921 next_time = rrq->rrq_stop_time;
922 }
923 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart06918ac2014-02-20 09:57:57 -0500924 if ((!list_empty(&phba->active_rrq_list)) &&
925 (!(phba->pport->load_flag & FC_UNLOADING)))
James Smart19ca7602010-11-20 23:11:55 -0500926 mod_timer(&phba->rrq_tmr, next_time);
James Smart1151e3e2011-02-16 12:39:35 -0500927 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
928 list_del(&rrq->list);
Bart Van Asscheffd43812019-03-28 11:06:17 -0700929 if (!rrq->send_rrq) {
James Smart1151e3e2011-02-16 12:39:35 -0500930 /* this call will free the rrq */
Bart Van Asscheffd43812019-03-28 11:06:17 -0700931 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
932 } else if (lpfc_send_rrq(phba, rrq)) {
James Smart1151e3e2011-02-16 12:39:35 -0500933 /* if we send the rrq then the completion handler
934 * will clear the bit in the xribitmap.
935 */
936 lpfc_clr_rrq_active(phba, rrq->xritag,
937 rrq);
938 }
939 }
James Smart19ca7602010-11-20 23:11:55 -0500940}
941
942/**
943 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
944 * @vport: Pointer to vport context object.
945 * @xri: The xri used in the exchange.
946 * @did: The targets DID for this exchange.
947 *
948 * returns NULL = rrq not found in the phba->active_rrq_list.
949 * rrq = rrq for this xri and target.
950 **/
951struct lpfc_node_rrq *
952lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
953{
954 struct lpfc_hba *phba = vport->phba;
955 struct lpfc_node_rrq *rrq;
956 struct lpfc_node_rrq *nextrrq;
957 unsigned long iflags;
958
959 if (phba->sli_rev != LPFC_SLI_REV4)
960 return NULL;
961 spin_lock_irqsave(&phba->hbalock, iflags);
962 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
963 if (rrq->vport == vport && rrq->xritag == xri &&
964 rrq->nlp_DID == did){
965 list_del(&rrq->list);
966 spin_unlock_irqrestore(&phba->hbalock, iflags);
967 return rrq;
968 }
969 }
970 spin_unlock_irqrestore(&phba->hbalock, iflags);
971 return NULL;
972}
973
974/**
975 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
976 * @vport: Pointer to vport context object.
James Smart1151e3e2011-02-16 12:39:35 -0500977 * @ndlp: Pointer to the lpfc_node_list structure.
978 * If ndlp is NULL Remove all active RRQs for this vport from the
979 * phba->active_rrq_list and clear the rrq.
980 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
James Smart19ca7602010-11-20 23:11:55 -0500981 **/
982void
James Smart1151e3e2011-02-16 12:39:35 -0500983lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500984
985{
986 struct lpfc_hba *phba = vport->phba;
987 struct lpfc_node_rrq *rrq;
988 struct lpfc_node_rrq *nextrrq;
989 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500990 LIST_HEAD(rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500991
992 if (phba->sli_rev != LPFC_SLI_REV4)
993 return;
James Smart1151e3e2011-02-16 12:39:35 -0500994 if (!ndlp) {
995 lpfc_sli4_vport_delete_els_xri_aborted(vport);
996 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
James Smart19ca7602010-11-20 23:11:55 -0500997 }
James Smart1151e3e2011-02-16 12:39:35 -0500998 spin_lock_irqsave(&phba->hbalock, iflags);
999 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
1000 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
1001 list_move(&rrq->list, &rrq_list);
James Smart19ca7602010-11-20 23:11:55 -05001002 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart1151e3e2011-02-16 12:39:35 -05001003
1004 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1005 list_del(&rrq->list);
1006 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1007 }
James Smart19ca7602010-11-20 23:11:55 -05001008}
1009
1010/**
James Smart1151e3e2011-02-16 12:39:35 -05001011 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -05001012 * @phba: Pointer to HBA context object.
1013 * @ndlp: Targets nodelist pointer for this exchange.
1014 * @xritag the xri in the bitmap to test.
1015 *
James Smarte2a8be52019-05-06 17:26:47 -07001016 * This function returns:
1017 * 0 = rrq not active for this xri
1018 * 1 = rrq is valid for this xri.
James Smart19ca7602010-11-20 23:11:55 -05001019 **/
James Smart1151e3e2011-02-16 12:39:35 -05001020int
1021lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smart19ca7602010-11-20 23:11:55 -05001022 uint16_t xritag)
1023{
James Smart19ca7602010-11-20 23:11:55 -05001024 if (!ndlp)
1025 return 0;
James Smartcff261f2013-12-17 20:29:47 -05001026 if (!ndlp->active_rrqs_xri_bitmap)
1027 return 0;
1028 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
Colin Ian King258f84f2019-02-12 15:29:45 +00001029 return 1;
James Smart19ca7602010-11-20 23:11:55 -05001030 else
1031 return 0;
1032}
1033
1034/**
1035 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1036 * @phba: Pointer to HBA context object.
1037 * @ndlp: nodelist pointer for this target.
1038 * @xritag: xri used in this exchange.
1039 * @rxid: Remote Exchange ID.
1040 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1041 *
1042 * This function takes the hbalock.
1043 * The active bit is always set in the active rrq xri_bitmap even
1044 * if there is no slot avaiable for the other rrq information.
1045 *
1046 * returns 0 rrq actived for this xri
1047 * < 0 No memory or invalid ndlp.
1048 **/
1049int
1050lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smartb42c07c2012-01-18 16:25:55 -05001051 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
James Smart19ca7602010-11-20 23:11:55 -05001052{
James Smart19ca7602010-11-20 23:11:55 -05001053 unsigned long iflags;
James Smartb42c07c2012-01-18 16:25:55 -05001054 struct lpfc_node_rrq *rrq;
1055 int empty;
1056
1057 if (!ndlp)
1058 return -EINVAL;
1059
1060 if (!phba->cfg_enable_rrq)
1061 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -05001062
1063 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -05001064 if (phba->pport->load_flag & FC_UNLOADING) {
1065 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1066 goto out;
1067 }
1068
1069 /*
1070 * set the active bit even if there is no mem available.
1071 */
1072 if (NLP_CHK_FREE_REQ(ndlp))
1073 goto out;
1074
1075 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1076 goto out;
1077
James Smartcff261f2013-12-17 20:29:47 -05001078 if (!ndlp->active_rrqs_xri_bitmap)
1079 goto out;
1080
1081 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
James Smartb42c07c2012-01-18 16:25:55 -05001082 goto out;
1083
James Smart19ca7602010-11-20 23:11:55 -05001084 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -05001085 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1086 if (!rrq) {
1087 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1088 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1089 " DID:0x%x Send:%d\n",
1090 xritag, rxid, ndlp->nlp_DID, send_rrq);
1091 return -EINVAL;
1092 }
James Smarte5771b42013-03-01 16:37:14 -05001093 if (phba->cfg_enable_rrq == 1)
1094 rrq->send_rrq = send_rrq;
1095 else
1096 rrq->send_rrq = 0;
James Smartb42c07c2012-01-18 16:25:55 -05001097 rrq->xritag = xritag;
James Smart256ec0d2013-04-17 20:14:58 -04001098 rrq->rrq_stop_time = jiffies +
1099 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smartb42c07c2012-01-18 16:25:55 -05001100 rrq->ndlp = ndlp;
1101 rrq->nlp_DID = ndlp->nlp_DID;
1102 rrq->vport = ndlp->vport;
1103 rrq->rxid = rxid;
James Smartb42c07c2012-01-18 16:25:55 -05001104 spin_lock_irqsave(&phba->hbalock, iflags);
1105 empty = list_empty(&phba->active_rrq_list);
1106 list_add_tail(&rrq->list, &phba->active_rrq_list);
1107 phba->hba_flag |= HBA_RRQ_ACTIVE;
1108 if (empty)
1109 lpfc_worker_wake_up(phba);
1110 spin_unlock_irqrestore(&phba->hbalock, iflags);
1111 return 0;
1112out:
1113 spin_unlock_irqrestore(&phba->hbalock, iflags);
1114 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1115 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1116 " DID:0x%x Send:%d\n",
1117 xritag, rxid, ndlp->nlp_DID, send_rrq);
1118 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -05001119}
1120
1121/**
James Smart895427b2017-02-12 13:52:30 -08001122 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
James Smartda0436e2009-05-22 14:51:39 -04001123 * @phba: Pointer to HBA context object.
James Smart19ca7602010-11-20 23:11:55 -05001124 * @piocb: Pointer to the iocbq.
James Smartda0436e2009-05-22 14:51:39 -04001125 *
James Smarte2a8be52019-05-06 17:26:47 -07001126 * The driver calls this function with either the nvme ls ring lock
1127 * or the fc els ring lock held depending on the iocb usage. This function
1128 * gets a new driver sglq object from the sglq list. If the list is not empty
1129 * then it is successful, it returns pointer to the newly allocated sglq
1130 * object else it returns NULL.
James Smartda0436e2009-05-22 14:51:39 -04001131 **/
1132static struct lpfc_sglq *
James Smart895427b2017-02-12 13:52:30 -08001133__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
James Smartda0436e2009-05-22 14:51:39 -04001134{
James Smart895427b2017-02-12 13:52:30 -08001135 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
James Smartda0436e2009-05-22 14:51:39 -04001136 struct lpfc_sglq *sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -05001137 struct lpfc_sglq *start_sglq = NULL;
James Smartc4908502019-01-28 11:14:28 -08001138 struct lpfc_io_buf *lpfc_cmd;
James Smart19ca7602010-11-20 23:11:55 -05001139 struct lpfc_nodelist *ndlp;
James Smarte2a8be52019-05-06 17:26:47 -07001140 struct lpfc_sli_ring *pring = NULL;
James Smart19ca7602010-11-20 23:11:55 -05001141 int found = 0;
1142
James Smarte2a8be52019-05-06 17:26:47 -07001143 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1144 pring = phba->sli4_hba.nvmels_wq->pring;
1145 else
1146 pring = lpfc_phba_elsring(phba);
1147
1148 lockdep_assert_held(&pring->ring_lock);
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001149
James Smart19ca7602010-11-20 23:11:55 -05001150 if (piocbq->iocb_flag & LPFC_IO_FCP) {
James Smartc4908502019-01-28 11:14:28 -08001151 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
James Smart19ca7602010-11-20 23:11:55 -05001152 ndlp = lpfc_cmd->rdata->pnode;
James Smartbe858b62010-12-15 17:57:20 -05001153 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
James Smart6c7cf482015-04-07 15:07:25 -04001154 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
James Smart19ca7602010-11-20 23:11:55 -05001155 ndlp = piocbq->context_un.ndlp;
James Smart6c7cf482015-04-07 15:07:25 -04001156 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1157 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1158 ndlp = NULL;
1159 else
1160 ndlp = piocbq->context_un.ndlp;
1161 } else {
James Smart19ca7602010-11-20 23:11:55 -05001162 ndlp = piocbq->context1;
James Smart6c7cf482015-04-07 15:07:25 -04001163 }
James Smart19ca7602010-11-20 23:11:55 -05001164
James Smart895427b2017-02-12 13:52:30 -08001165 spin_lock(&phba->sli4_hba.sgl_list_lock);
1166 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
James Smart19ca7602010-11-20 23:11:55 -05001167 start_sglq = sglq;
1168 while (!found) {
1169 if (!sglq)
James Smartd11f54b2017-03-04 09:30:24 -08001170 break;
James Smart895427b2017-02-12 13:52:30 -08001171 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1172 test_bit(sglq->sli4_lxritag,
1173 ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -05001174 /* This xri has an rrq outstanding for this DID.
1175 * put it back in the list and get another xri.
1176 */
James Smart895427b2017-02-12 13:52:30 -08001177 list_add_tail(&sglq->list, lpfc_els_sgl_list);
James Smart19ca7602010-11-20 23:11:55 -05001178 sglq = NULL;
James Smart895427b2017-02-12 13:52:30 -08001179 list_remove_head(lpfc_els_sgl_list, sglq,
James Smart19ca7602010-11-20 23:11:55 -05001180 struct lpfc_sglq, list);
1181 if (sglq == start_sglq) {
James Smart14041bd2017-06-01 21:07:01 -07001182 list_add_tail(&sglq->list, lpfc_els_sgl_list);
James Smart19ca7602010-11-20 23:11:55 -05001183 sglq = NULL;
1184 break;
1185 } else
1186 continue;
1187 }
1188 sglq->ndlp = ndlp;
1189 found = 1;
James Smart6d368e52011-05-24 11:44:12 -04001190 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
James Smart19ca7602010-11-20 23:11:55 -05001191 sglq->state = SGL_ALLOCATED;
1192 }
James Smart895427b2017-02-12 13:52:30 -08001193 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smartda0436e2009-05-22 14:51:39 -04001194 return sglq;
1195}
1196
1197/**
James Smartf358dd02017-02-12 13:52:34 -08001198 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1199 * @phba: Pointer to HBA context object.
1200 * @piocb: Pointer to the iocbq.
1201 *
1202 * This function is called with the sgl_list lock held. This function
1203 * gets a new driver sglq object from the sglq list. If the
1204 * list is not empty then it is successful, it returns pointer to the newly
1205 * allocated sglq object else it returns NULL.
1206 **/
1207struct lpfc_sglq *
1208__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1209{
1210 struct list_head *lpfc_nvmet_sgl_list;
1211 struct lpfc_sglq *sglq = NULL;
1212
1213 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1214
1215 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1216
1217 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1218 if (!sglq)
1219 return NULL;
1220 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1221 sglq->state = SGL_ALLOCATED;
dea31012005-04-17 16:05:31 -05001222 return sglq;
1223}
1224
James Smarte59058c2008-08-24 21:49:00 -04001225/**
James Smart3621a712009-04-06 18:47:14 -04001226 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001227 * @phba: Pointer to HBA context object.
1228 *
1229 * This function is called with no lock held. This function
1230 * allocates a new driver iocb object from the iocb pool. If the
1231 * allocation is successful, it returns pointer to the newly
1232 * allocated iocb object else it returns NULL.
1233 **/
James Smart2e0fef82007-06-17 19:56:36 -05001234struct lpfc_iocbq *
1235lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James Bottomley604a3e32005-10-29 10:28:33 -05001236{
James Smart2e0fef82007-06-17 19:56:36 -05001237 struct lpfc_iocbq * iocbq = NULL;
1238 unsigned long iflags;
1239
1240 spin_lock_irqsave(&phba->hbalock, iflags);
1241 iocbq = __lpfc_sli_get_iocbq(phba);
1242 spin_unlock_irqrestore(&phba->hbalock, iflags);
1243 return iocbq;
1244}
1245
James Smarte59058c2008-08-24 21:49:00 -04001246/**
James Smart4f774512009-05-22 14:52:35 -04001247 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1248 * @phba: Pointer to HBA context object.
1249 * @iocbq: Pointer to driver iocb object.
1250 *
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001251 * This function is called to release the driver iocb object
1252 * to the iocb pool. The iotag in the iocb object
James Smart4f774512009-05-22 14:52:35 -04001253 * does not change for each use of the iocb object. This function
1254 * clears all other fields of the iocb object when it is freed.
1255 * The sqlq structure that holds the xritag and phys and virtual
1256 * mappings for the scatter gather list is retrieved from the
1257 * active array of sglq. The get of the sglq pointer also clears
1258 * the entry in the array. If the status of the IO indiactes that
1259 * this IO was aborted then the sglq entry it put on the
1260 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1261 * IO has good status or fails for any other reason then the sglq
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001262 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1263 * asserted held in the code path calling this routine.
James Smart4f774512009-05-22 14:52:35 -04001264 **/
1265static void
1266__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1267{
1268 struct lpfc_sglq *sglq;
1269 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04001270 unsigned long iflag = 0;
James Smart895427b2017-02-12 13:52:30 -08001271 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04001272
1273 if (iocbq->sli4_xritag == NO_XRI)
1274 sglq = NULL;
1275 else
James Smart6d368e52011-05-24 11:44:12 -04001276 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1277
James Smart0e9bb8d2013-03-01 16:35:12 -05001278
James Smart4f774512009-05-22 14:52:35 -04001279 if (sglq) {
James Smartf358dd02017-02-12 13:52:34 -08001280 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1282 iflag);
James Smart0f65ff62010-02-26 14:14:23 -05001283 sglq->state = SGL_FREED;
James Smart19ca7602010-11-20 23:11:55 -05001284 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001285 list_add_tail(&sglq->list,
James Smartf358dd02017-02-12 13:52:34 -08001286 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1287 spin_unlock_irqrestore(
1288 &phba->sli4_hba.sgl_list_lock, iflag);
1289 goto out;
1290 }
1291
James Smart895427b2017-02-12 13:52:30 -08001292 pring = phba->sli4_hba.els_wq->pring;
James Smart4f774512009-05-22 14:52:35 -04001293 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1294 (sglq->state != SGL_XRI_ABORTED)) {
James Smart895427b2017-02-12 13:52:30 -08001295 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1296 iflag);
James Smart341af102010-01-26 23:07:37 -05001297 list_add(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001298 &phba->sli4_hba.lpfc_abts_els_sgl_list);
James Smart4f774512009-05-22 14:52:35 -04001299 spin_unlock_irqrestore(
James Smart895427b2017-02-12 13:52:30 -08001300 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart4f774512009-05-22 14:52:35 -04001301 } else {
James Smart895427b2017-02-12 13:52:30 -08001302 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1303 iflag);
James Smart4f774512009-05-22 14:52:35 -04001304 sglq->state = SGL_FREED;
1305 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001306 list_add_tail(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001307 &phba->sli4_hba.lpfc_els_sgl_list);
1308 spin_unlock_irqrestore(
1309 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart2a9bf3d2010-06-07 15:24:45 -04001310
1311 /* Check if TXQ queue needs to be serviced */
James Smart0e9bb8d2013-03-01 16:35:12 -05001312 if (!list_empty(&pring->txq))
James Smart2a9bf3d2010-06-07 15:24:45 -04001313 lpfc_worker_wake_up(phba);
James Smart0f65ff62010-02-26 14:14:23 -05001314 }
James Smart4f774512009-05-22 14:52:35 -04001315 }
1316
James Smartf358dd02017-02-12 13:52:34 -08001317out:
James Smart4f774512009-05-22 14:52:35 -04001318 /*
1319 * Clean all volatile data fields, preserve iotag and node struct.
1320 */
1321 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
James Smart6d368e52011-05-24 11:44:12 -04001322 iocbq->sli4_lxritag = NO_XRI;
James Smart4f774512009-05-22 14:52:35 -04001323 iocbq->sli4_xritag = NO_XRI;
James Smartf358dd02017-02-12 13:52:34 -08001324 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1325 LPFC_IO_NVME_LS);
James Smart4f774512009-05-22 14:52:35 -04001326 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1327}
1328
James Smart2a9bf3d2010-06-07 15:24:45 -04001329
James Smart4f774512009-05-22 14:52:35 -04001330/**
James Smart3772a992009-05-22 14:50:54 -04001331 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1332 * @phba: Pointer to HBA context object.
1333 * @iocbq: Pointer to driver iocb object.
1334 *
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001335 * This function is called to release the driver iocb object to the
1336 * iocb pool. The iotag in the iocb object does not change for each
1337 * use of the iocb object. This function clears all other fields of
1338 * the iocb object when it is freed. The hbalock is asserted held in
1339 * the code path calling this routine.
James Smart3772a992009-05-22 14:50:54 -04001340 **/
1341static void
1342__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1343{
1344 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1345
James Smart0e9bb8d2013-03-01 16:35:12 -05001346 /*
James Smart3772a992009-05-22 14:50:54 -04001347 * Clean all volatile data fields, preserve iotag and node struct.
1348 */
1349 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1350 iocbq->sli4_xritag = NO_XRI;
1351 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1352}
1353
1354/**
James Smart3621a712009-04-06 18:47:14 -04001355 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001356 * @phba: Pointer to HBA context object.
1357 * @iocbq: Pointer to driver iocb object.
1358 *
1359 * This function is called with hbalock held to release driver
1360 * iocb object to the iocb pool. The iotag in the iocb object
1361 * does not change for each use of the iocb object. This function
1362 * clears all other fields of the iocb object when it is freed.
1363 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001364static void
James Smart2e0fef82007-06-17 19:56:36 -05001365__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1366{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001367 lockdep_assert_held(&phba->hbalock);
1368
James Smart3772a992009-05-22 14:50:54 -04001369 phba->__lpfc_sli_release_iocbq(phba, iocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -04001370 phba->iocb_cnt--;
James Bottomley604a3e32005-10-29 10:28:33 -05001371}
1372
James Smarte59058c2008-08-24 21:49:00 -04001373/**
James Smart3621a712009-04-06 18:47:14 -04001374 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001375 * @phba: Pointer to HBA context object.
1376 * @iocbq: Pointer to driver iocb object.
1377 *
1378 * This function is called with no lock held to release the iocb to
1379 * iocb pool.
1380 **/
James Smart2e0fef82007-06-17 19:56:36 -05001381void
1382lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1383{
1384 unsigned long iflags;
1385
1386 /*
1387 * Clean all volatile data fields, preserve iotag and node struct.
1388 */
1389 spin_lock_irqsave(&phba->hbalock, iflags);
1390 __lpfc_sli_release_iocbq(phba, iocbq);
1391 spin_unlock_irqrestore(&phba->hbalock, iflags);
1392}
1393
James Smarte59058c2008-08-24 21:49:00 -04001394/**
James Smarta257bf92009-04-06 18:48:10 -04001395 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1396 * @phba: Pointer to HBA context object.
1397 * @iocblist: List of IOCBs.
1398 * @ulpstatus: ULP status in IOCB command field.
1399 * @ulpWord4: ULP word-4 in IOCB command field.
1400 *
1401 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1402 * on the list by invoking the complete callback function associated with the
1403 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1404 * fields.
1405 **/
1406void
1407lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1408 uint32_t ulpstatus, uint32_t ulpWord4)
1409{
1410 struct lpfc_iocbq *piocb;
1411
1412 while (!list_empty(iocblist)) {
1413 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
James Smart84f2ddf2019-08-14 16:56:55 -07001414 if (!piocb->iocb_cmpl) {
1415 if (piocb->iocb_flag & LPFC_IO_NVME)
1416 lpfc_nvme_cancel_iocb(phba, piocb);
1417 else
1418 lpfc_sli_release_iocbq(phba, piocb);
1419 } else {
James Smarta257bf92009-04-06 18:48:10 -04001420 piocb->iocb.ulpStatus = ulpstatus;
1421 piocb->iocb.un.ulpWord[4] = ulpWord4;
1422 (piocb->iocb_cmpl) (phba, piocb, piocb);
1423 }
1424 }
1425 return;
1426}
1427
1428/**
James Smart3621a712009-04-06 18:47:14 -04001429 * lpfc_sli_iocb_cmd_type - Get the iocb type
1430 * @iocb_cmnd: iocb command code.
James Smarte59058c2008-08-24 21:49:00 -04001431 *
1432 * This function is called by ring event handler function to get the iocb type.
1433 * This function translates the iocb command to an iocb command type used to
1434 * decide the final disposition of each completed IOCB.
1435 * The function returns
1436 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1437 * LPFC_SOL_IOCB if it is a solicited iocb completion
1438 * LPFC_ABORT_IOCB if it is an abort iocb
1439 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1440 *
1441 * The caller is not required to hold any lock.
1442 **/
dea31012005-04-17 16:05:31 -05001443static lpfc_iocb_type
1444lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1445{
1446 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1447
1448 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1449 return 0;
1450
1451 switch (iocb_cmnd) {
1452 case CMD_XMIT_SEQUENCE_CR:
1453 case CMD_XMIT_SEQUENCE_CX:
1454 case CMD_XMIT_BCAST_CN:
1455 case CMD_XMIT_BCAST_CX:
1456 case CMD_ELS_REQUEST_CR:
1457 case CMD_ELS_REQUEST_CX:
1458 case CMD_CREATE_XRI_CR:
1459 case CMD_CREATE_XRI_CX:
1460 case CMD_GET_RPI_CN:
1461 case CMD_XMIT_ELS_RSP_CX:
1462 case CMD_GET_RPI_CR:
1463 case CMD_FCP_IWRITE_CR:
1464 case CMD_FCP_IWRITE_CX:
1465 case CMD_FCP_IREAD_CR:
1466 case CMD_FCP_IREAD_CX:
1467 case CMD_FCP_ICMND_CR:
1468 case CMD_FCP_ICMND_CX:
James Smartf5603512006-12-02 13:35:43 -05001469 case CMD_FCP_TSEND_CX:
1470 case CMD_FCP_TRSP_CX:
1471 case CMD_FCP_TRECEIVE_CX:
1472 case CMD_FCP_AUTO_TRSP_CX:
dea31012005-04-17 16:05:31 -05001473 case CMD_ADAPTER_MSG:
1474 case CMD_ADAPTER_DUMP:
1475 case CMD_XMIT_SEQUENCE64_CR:
1476 case CMD_XMIT_SEQUENCE64_CX:
1477 case CMD_XMIT_BCAST64_CN:
1478 case CMD_XMIT_BCAST64_CX:
1479 case CMD_ELS_REQUEST64_CR:
1480 case CMD_ELS_REQUEST64_CX:
1481 case CMD_FCP_IWRITE64_CR:
1482 case CMD_FCP_IWRITE64_CX:
1483 case CMD_FCP_IREAD64_CR:
1484 case CMD_FCP_IREAD64_CX:
1485 case CMD_FCP_ICMND64_CR:
1486 case CMD_FCP_ICMND64_CX:
James Smartf5603512006-12-02 13:35:43 -05001487 case CMD_FCP_TSEND64_CX:
1488 case CMD_FCP_TRSP64_CX:
1489 case CMD_FCP_TRECEIVE64_CX:
dea31012005-04-17 16:05:31 -05001490 case CMD_GEN_REQUEST64_CR:
1491 case CMD_GEN_REQUEST64_CX:
1492 case CMD_XMIT_ELS_RSP64_CX:
James Smartda0436e2009-05-22 14:51:39 -04001493 case DSSCMD_IWRITE64_CR:
1494 case DSSCMD_IWRITE64_CX:
1495 case DSSCMD_IREAD64_CR:
1496 case DSSCMD_IREAD64_CX:
dea31012005-04-17 16:05:31 -05001497 type = LPFC_SOL_IOCB;
1498 break;
1499 case CMD_ABORT_XRI_CN:
1500 case CMD_ABORT_XRI_CX:
1501 case CMD_CLOSE_XRI_CN:
1502 case CMD_CLOSE_XRI_CX:
1503 case CMD_XRI_ABORTED_CX:
1504 case CMD_ABORT_MXRI64_CN:
James Smart6669f9b2009-10-02 15:16:45 -04001505 case CMD_XMIT_BLS_RSP64_CX:
dea31012005-04-17 16:05:31 -05001506 type = LPFC_ABORT_IOCB;
1507 break;
1508 case CMD_RCV_SEQUENCE_CX:
1509 case CMD_RCV_ELS_REQ_CX:
1510 case CMD_RCV_SEQUENCE64_CX:
1511 case CMD_RCV_ELS_REQ64_CX:
James Smart57127f12007-10-27 13:37:05 -04001512 case CMD_ASYNC_STATUS:
James Smarted957682007-06-17 19:56:37 -05001513 case CMD_IOCB_RCV_SEQ64_CX:
1514 case CMD_IOCB_RCV_ELS64_CX:
1515 case CMD_IOCB_RCV_CONT64_CX:
James Smart3163f722008-02-08 18:50:25 -05001516 case CMD_IOCB_RET_XRI64_CX:
dea31012005-04-17 16:05:31 -05001517 type = LPFC_UNSOL_IOCB;
1518 break;
James Smart3163f722008-02-08 18:50:25 -05001519 case CMD_IOCB_XMIT_MSEQ64_CR:
1520 case CMD_IOCB_XMIT_MSEQ64_CX:
1521 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1522 case CMD_IOCB_RCV_ELS_LIST64_CX:
1523 case CMD_IOCB_CLOSE_EXTENDED_CN:
1524 case CMD_IOCB_ABORT_EXTENDED_CN:
1525 case CMD_IOCB_RET_HBQE64_CN:
1526 case CMD_IOCB_FCP_IBIDIR64_CR:
1527 case CMD_IOCB_FCP_IBIDIR64_CX:
1528 case CMD_IOCB_FCP_ITASKMGT64_CX:
1529 case CMD_IOCB_LOGENTRY_CN:
1530 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1531 printk("%s - Unhandled SLI-3 Command x%x\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07001532 __func__, iocb_cmnd);
James Smart3163f722008-02-08 18:50:25 -05001533 type = LPFC_UNKNOWN_IOCB;
1534 break;
dea31012005-04-17 16:05:31 -05001535 default:
1536 type = LPFC_UNKNOWN_IOCB;
1537 break;
1538 }
1539
1540 return type;
1541}
1542
James Smarte59058c2008-08-24 21:49:00 -04001543/**
James Smart3621a712009-04-06 18:47:14 -04001544 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
James Smarte59058c2008-08-24 21:49:00 -04001545 * @phba: Pointer to HBA context object.
1546 *
1547 * This function is called from SLI initialization code
1548 * to configure every ring of the HBA's SLI interface. The
1549 * caller is not required to hold any lock. This function issues
1550 * a config_ring mailbox command for each ring.
1551 * This function returns zero if successful else returns a negative
1552 * error code.
1553 **/
dea31012005-04-17 16:05:31 -05001554static int
James Smarted957682007-06-17 19:56:37 -05001555lpfc_sli_ring_map(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05001556{
1557 struct lpfc_sli *psli = &phba->sli;
James Smarted957682007-06-17 19:56:37 -05001558 LPFC_MBOXQ_t *pmb;
1559 MAILBOX_t *pmbox;
1560 int i, rc, ret = 0;
dea31012005-04-17 16:05:31 -05001561
James Smarted957682007-06-17 19:56:37 -05001562 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1563 if (!pmb)
1564 return -ENOMEM;
James Smart04c68492009-05-22 14:52:52 -04001565 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05001566 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05001567 for (i = 0; i < psli->num_rings; i++) {
dea31012005-04-17 16:05:31 -05001568 lpfc_config_ring(phba, i, pmb);
1569 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1570 if (rc != MBX_SUCCESS) {
James Smart92d7f7b2007-06-17 19:56:38 -05001571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04001572 "0446 Adapter failed to init (%d), "
dea31012005-04-17 16:05:31 -05001573 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1574 "ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001575 rc, pmbox->mbxCommand,
1576 pmbox->mbxStatus, i);
James Smart2e0fef82007-06-17 19:56:36 -05001577 phba->link_state = LPFC_HBA_ERROR;
James Smarted957682007-06-17 19:56:37 -05001578 ret = -ENXIO;
1579 break;
dea31012005-04-17 16:05:31 -05001580 }
1581 }
James Smarted957682007-06-17 19:56:37 -05001582 mempool_free(pmb, phba->mbox_mem_pool);
1583 return ret;
dea31012005-04-17 16:05:31 -05001584}
1585
James Smarte59058c2008-08-24 21:49:00 -04001586/**
James Smart3621a712009-04-06 18:47:14 -04001587 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
James Smarte59058c2008-08-24 21:49:00 -04001588 * @phba: Pointer to HBA context object.
1589 * @pring: Pointer to driver SLI ring object.
1590 * @piocb: Pointer to the driver iocb object.
1591 *
James Smarte2a8be52019-05-06 17:26:47 -07001592 * The driver calls this function with the hbalock held for SLI3 ports or
1593 * the ring lock held for SLI4 ports. The function adds the
James Smarte59058c2008-08-24 21:49:00 -04001594 * new iocb to txcmplq of the given ring. This function always returns
1595 * 0. If this function is called for ELS ring, this function checks if
1596 * there is a vport associated with the ELS command. This function also
1597 * starts els_tmofunc timer if this is an ELS command.
1598 **/
dea31012005-04-17 16:05:31 -05001599static int
James Smart2e0fef82007-06-17 19:56:36 -05001600lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1601 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05001602{
James Smarte2a8be52019-05-06 17:26:47 -07001603 if (phba->sli_rev == LPFC_SLI_REV4)
1604 lockdep_assert_held(&pring->ring_lock);
1605 else
1606 lockdep_assert_held(&phba->hbalock);
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001607
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001608 BUG_ON(!piocb);
Johannes Thumshirn22466da2016-07-29 15:30:56 +02001609
dea31012005-04-17 16:05:31 -05001610 list_add_tail(&piocb->list, &pring->txcmplq);
James Smart4f2e66c2012-05-09 21:17:07 -04001611 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
James Smartc4908502019-01-28 11:14:28 -08001612 pring->txcmplq_cnt++;
James Smart2a9bf3d2010-06-07 15:24:45 -04001613
James Smart92d7f7b2007-06-17 19:56:38 -05001614 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1615 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001616 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1617 BUG_ON(!piocb->vport);
1618 if (!(piocb->vport->load_flag & FC_UNLOADING))
1619 mod_timer(&piocb->vport->els_tmofunc,
1620 jiffies +
1621 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1622 }
dea31012005-04-17 16:05:31 -05001623
James Smart2e0fef82007-06-17 19:56:36 -05001624 return 0;
dea31012005-04-17 16:05:31 -05001625}
1626
James Smarte59058c2008-08-24 21:49:00 -04001627/**
James Smart3621a712009-04-06 18:47:14 -04001628 * lpfc_sli_ringtx_get - Get first element of the txq
James Smarte59058c2008-08-24 21:49:00 -04001629 * @phba: Pointer to HBA context object.
1630 * @pring: Pointer to driver SLI ring object.
1631 *
1632 * This function is called with hbalock held to get next
1633 * iocb in txq of the given ring. If there is any iocb in
1634 * the txq, the function returns first iocb in the list after
1635 * removing the iocb from the list, else it returns NULL.
1636 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04001637struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05001638lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001639{
dea31012005-04-17 16:05:31 -05001640 struct lpfc_iocbq *cmd_iocb;
1641
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001642 lockdep_assert_held(&phba->hbalock);
1643
James Smart858c9f62007-06-17 19:56:39 -05001644 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
James Smart2e0fef82007-06-17 19:56:36 -05001645 return cmd_iocb;
dea31012005-04-17 16:05:31 -05001646}
1647
James Smarte59058c2008-08-24 21:49:00 -04001648/**
James Smart3621a712009-04-06 18:47:14 -04001649 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
James Smarte59058c2008-08-24 21:49:00 -04001650 * @phba: Pointer to HBA context object.
1651 * @pring: Pointer to driver SLI ring object.
1652 *
1653 * This function is called with hbalock held and the caller must post the
1654 * iocb without releasing the lock. If the caller releases the lock,
1655 * iocb slot returned by the function is not guaranteed to be available.
1656 * The function returns pointer to the next available iocb slot if there
1657 * is available slot in the ring, else it returns NULL.
1658 * If the get index of the ring is ahead of the put index, the function
1659 * will post an error attention event to the worker thread to take the
1660 * HBA to offline state.
1661 **/
dea31012005-04-17 16:05:31 -05001662static IOCB_t *
1663lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1664{
James Smart34b02dc2008-08-24 21:49:55 -04001665 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James Smart7e56aa22012-08-03 12:35:34 -04001666 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001667
1668 lockdep_assert_held(&phba->hbalock);
1669
James Smart7e56aa22012-08-03 12:35:34 -04001670 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1671 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1672 pring->sli.sli3.next_cmdidx = 0;
dea31012005-04-17 16:05:31 -05001673
James Smart7e56aa22012-08-03 12:35:34 -04001674 if (unlikely(pring->sli.sli3.local_getidx ==
1675 pring->sli.sli3.next_cmdidx)) {
dea31012005-04-17 16:05:31 -05001676
James Smart7e56aa22012-08-03 12:35:34 -04001677 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05001678
James Smart7e56aa22012-08-03 12:35:34 -04001679 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea31012005-04-17 16:05:31 -05001680 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001681 "0315 Ring %d issue: portCmdGet %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02001682 "is bigger than cmd ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001683 pring->ringno,
James Smart7e56aa22012-08-03 12:35:34 -04001684 pring->sli.sli3.local_getidx,
1685 max_cmd_idx);
dea31012005-04-17 16:05:31 -05001686
James Smart2e0fef82007-06-17 19:56:36 -05001687 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05001688 /*
1689 * All error attention handlers are posted to
1690 * worker thread
1691 */
1692 phba->work_ha |= HA_ERATT;
1693 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05001694
James Smart5e9d9b82008-06-14 22:52:53 -04001695 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -05001696
1697 return NULL;
1698 }
1699
James Smart7e56aa22012-08-03 12:35:34 -04001700 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea31012005-04-17 16:05:31 -05001701 return NULL;
1702 }
1703
James Smarted957682007-06-17 19:56:37 -05001704 return lpfc_cmd_iocb(phba, pring);
dea31012005-04-17 16:05:31 -05001705}
1706
James Smarte59058c2008-08-24 21:49:00 -04001707/**
James Smart3621a712009-04-06 18:47:14 -04001708 * lpfc_sli_next_iotag - Get an iotag for the iocb
James Smarte59058c2008-08-24 21:49:00 -04001709 * @phba: Pointer to HBA context object.
1710 * @iocbq: Pointer to driver iocb object.
1711 *
1712 * This function gets an iotag for the iocb. If there is no unused iotag and
1713 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1714 * array and assigns a new iotag.
1715 * The function returns the allocated iotag if successful, else returns zero.
1716 * Zero is not a valid iotag.
1717 * The caller is not required to hold any lock.
1718 **/
James Bottomley604a3e32005-10-29 10:28:33 -05001719uint16_t
James Smart2e0fef82007-06-17 19:56:36 -05001720lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea31012005-04-17 16:05:31 -05001721{
James Smart2e0fef82007-06-17 19:56:36 -05001722 struct lpfc_iocbq **new_arr;
1723 struct lpfc_iocbq **old_arr;
James Bottomley604a3e32005-10-29 10:28:33 -05001724 size_t new_len;
1725 struct lpfc_sli *psli = &phba->sli;
1726 uint16_t iotag;
dea31012005-04-17 16:05:31 -05001727
James Smart2e0fef82007-06-17 19:56:36 -05001728 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001729 iotag = psli->last_iotag;
1730 if(++iotag < psli->iocbq_lookup_len) {
1731 psli->last_iotag = iotag;
1732 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001733 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001734 iocbq->iotag = iotag;
1735 return iotag;
James Smart2e0fef82007-06-17 19:56:36 -05001736 } else if (psli->iocbq_lookup_len < (0xffff
James Bottomley604a3e32005-10-29 10:28:33 -05001737 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1738 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
James Smart2e0fef82007-06-17 19:56:36 -05001739 spin_unlock_irq(&phba->hbalock);
Kees Cook6396bb22018-06-12 14:03:40 -07001740 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
James Bottomley604a3e32005-10-29 10:28:33 -05001741 GFP_KERNEL);
1742 if (new_arr) {
James Smart2e0fef82007-06-17 19:56:36 -05001743 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001744 old_arr = psli->iocbq_lookup;
1745 if (new_len <= psli->iocbq_lookup_len) {
1746 /* highly unprobable case */
1747 kfree(new_arr);
1748 iotag = psli->last_iotag;
1749 if(++iotag < psli->iocbq_lookup_len) {
1750 psli->last_iotag = iotag;
1751 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001752 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001753 iocbq->iotag = iotag;
1754 return iotag;
1755 }
James Smart2e0fef82007-06-17 19:56:36 -05001756 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001757 return 0;
1758 }
1759 if (psli->iocbq_lookup)
1760 memcpy(new_arr, old_arr,
1761 ((psli->last_iotag + 1) *
James Smart311464e2007-08-02 11:10:37 -04001762 sizeof (struct lpfc_iocbq *)));
James Bottomley604a3e32005-10-29 10:28:33 -05001763 psli->iocbq_lookup = new_arr;
1764 psli->iocbq_lookup_len = new_len;
1765 psli->last_iotag = iotag;
1766 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001767 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001768 iocbq->iotag = iotag;
1769 kfree(old_arr);
1770 return iotag;
1771 }
James Smart8f6d98d2006-08-01 07:34:00 -04001772 } else
James Smart2e0fef82007-06-17 19:56:36 -05001773 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001774
James Smartbc739052010-08-04 16:11:18 -04001775 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001776 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1777 psli->last_iotag);
dea31012005-04-17 16:05:31 -05001778
James Bottomley604a3e32005-10-29 10:28:33 -05001779 return 0;
dea31012005-04-17 16:05:31 -05001780}
1781
James Smarte59058c2008-08-24 21:49:00 -04001782/**
James Smart3621a712009-04-06 18:47:14 -04001783 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
James Smarte59058c2008-08-24 21:49:00 -04001784 * @phba: Pointer to HBA context object.
1785 * @pring: Pointer to driver SLI ring object.
1786 * @iocb: Pointer to iocb slot in the ring.
1787 * @nextiocb: Pointer to driver iocb object which need to be
1788 * posted to firmware.
1789 *
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001790 * This function is called to post a new iocb to the firmware. This
1791 * function copies the new iocb to ring iocb slot and updates the
1792 * ring pointers. It adds the new iocb to txcmplq if there is
James Smarte59058c2008-08-24 21:49:00 -04001793 * a completion call back for this iocb else the function will free the
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001794 * iocb object. The hbalock is asserted held in the code path calling
1795 * this routine.
James Smarte59058c2008-08-24 21:49:00 -04001796 **/
dea31012005-04-17 16:05:31 -05001797static void
1798lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1799 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1800{
1801 /*
James Bottomley604a3e32005-10-29 10:28:33 -05001802 * Set up an iotag
dea31012005-04-17 16:05:31 -05001803 */
James Bottomley604a3e32005-10-29 10:28:33 -05001804 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea31012005-04-17 16:05:31 -05001805
James Smarte2a0a9d2008-12-04 22:40:02 -05001806
James Smarta58cbd52007-08-02 11:09:43 -04001807 if (pring->ringno == LPFC_ELS_RING) {
1808 lpfc_debugfs_slow_ring_trc(phba,
1809 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1810 *(((uint32_t *) &nextiocb->iocb) + 4),
1811 *(((uint32_t *) &nextiocb->iocb) + 6),
1812 *(((uint32_t *) &nextiocb->iocb) + 7));
1813 }
1814
dea31012005-04-17 16:05:31 -05001815 /*
1816 * Issue iocb command to adapter
1817 */
James Smart92d7f7b2007-06-17 19:56:38 -05001818 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea31012005-04-17 16:05:31 -05001819 wmb();
1820 pring->stats.iocb_cmd++;
1821
1822 /*
1823 * If there is no completion routine to call, we can release the
1824 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1825 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1826 */
1827 if (nextiocb->iocb_cmpl)
1828 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
James Bottomley604a3e32005-10-29 10:28:33 -05001829 else
James Smart2e0fef82007-06-17 19:56:36 -05001830 __lpfc_sli_release_iocbq(phba, nextiocb);
dea31012005-04-17 16:05:31 -05001831
1832 /*
1833 * Let the HBA know what IOCB slot will be the next one the
1834 * driver will put a command into.
1835 */
James Smart7e56aa22012-08-03 12:35:34 -04001836 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1837 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea31012005-04-17 16:05:31 -05001838}
1839
James Smarte59058c2008-08-24 21:49:00 -04001840/**
James Smart3621a712009-04-06 18:47:14 -04001841 * lpfc_sli_update_full_ring - Update the chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001842 * @phba: Pointer to HBA context object.
1843 * @pring: Pointer to driver SLI ring object.
1844 *
1845 * The caller is not required to hold any lock for calling this function.
1846 * This function updates the chip attention bits for the ring to inform firmware
1847 * that there are pending work to be done for this ring and requests an
1848 * interrupt when there is space available in the ring. This function is
1849 * called when the driver is unable to post more iocbs to the ring due
1850 * to unavailability of space in the ring.
1851 **/
dea31012005-04-17 16:05:31 -05001852static void
James Smart2e0fef82007-06-17 19:56:36 -05001853lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001854{
1855 int ringno = pring->ringno;
1856
1857 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1858
1859 wmb();
1860
1861 /*
1862 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1863 * The HBA will tell us when an IOCB entry is available.
1864 */
1865 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1866 readl(phba->CAregaddr); /* flush */
1867
1868 pring->stats.iocb_cmd_full++;
1869}
1870
James Smarte59058c2008-08-24 21:49:00 -04001871/**
James Smart3621a712009-04-06 18:47:14 -04001872 * lpfc_sli_update_ring - Update chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001873 * @phba: Pointer to HBA context object.
1874 * @pring: Pointer to driver SLI ring object.
1875 *
1876 * This function updates the chip attention register bit for the
1877 * given ring to inform HBA that there is more work to be done
1878 * in this ring. The caller is not required to hold any lock.
1879 **/
dea31012005-04-17 16:05:31 -05001880static void
James Smart2e0fef82007-06-17 19:56:36 -05001881lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001882{
1883 int ringno = pring->ringno;
1884
1885 /*
1886 * Tell the HBA that there is work to do in this ring.
1887 */
James Smart34b02dc2008-08-24 21:49:55 -04001888 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1889 wmb();
1890 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1891 readl(phba->CAregaddr); /* flush */
1892 }
dea31012005-04-17 16:05:31 -05001893}
1894
James Smarte59058c2008-08-24 21:49:00 -04001895/**
James Smart3621a712009-04-06 18:47:14 -04001896 * lpfc_sli_resume_iocb - Process iocbs in the txq
James Smarte59058c2008-08-24 21:49:00 -04001897 * @phba: Pointer to HBA context object.
1898 * @pring: Pointer to driver SLI ring object.
1899 *
1900 * This function is called with hbalock held to post pending iocbs
1901 * in the txq to the firmware. This function is called when driver
1902 * detects space available in the ring.
1903 **/
dea31012005-04-17 16:05:31 -05001904static void
James Smart2e0fef82007-06-17 19:56:36 -05001905lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001906{
1907 IOCB_t *iocb;
1908 struct lpfc_iocbq *nextiocb;
1909
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001910 lockdep_assert_held(&phba->hbalock);
1911
dea31012005-04-17 16:05:31 -05001912 /*
1913 * Check to see if:
1914 * (a) there is anything on the txq to send
1915 * (b) link is up
1916 * (c) link attention events can be processed (fcp ring only)
1917 * (d) IOCB processing is not blocked by the outstanding mbox command.
1918 */
James Smart0e9bb8d2013-03-01 16:35:12 -05001919
1920 if (lpfc_is_link_up(phba) &&
1921 (!list_empty(&pring->txq)) &&
James Smart895427b2017-02-12 13:52:30 -08001922 (pring->ringno != LPFC_FCP_RING ||
James Smart0b727fe2007-10-27 13:37:25 -04001923 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea31012005-04-17 16:05:31 -05001924
1925 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1926 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1927 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1928
1929 if (iocb)
1930 lpfc_sli_update_ring(phba, pring);
1931 else
1932 lpfc_sli_update_full_ring(phba, pring);
1933 }
1934
1935 return;
1936}
1937
James Smarte59058c2008-08-24 21:49:00 -04001938/**
James Smart3621a712009-04-06 18:47:14 -04001939 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001940 * @phba: Pointer to HBA context object.
1941 * @hbqno: HBQ number.
1942 *
1943 * This function is called with hbalock held to get the next
1944 * available slot for the given HBQ. If there is free slot
1945 * available for the HBQ it will return pointer to the next available
1946 * HBQ entry else it will return NULL.
1947 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001948static struct lpfc_hbq_entry *
James Smarted957682007-06-17 19:56:37 -05001949lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1950{
1951 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1952
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001953 lockdep_assert_held(&phba->hbalock);
1954
James Smarted957682007-06-17 19:56:37 -05001955 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1956 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1957 hbqp->next_hbqPutIdx = 0;
1958
1959 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
James Smart92d7f7b2007-06-17 19:56:38 -05001960 uint32_t raw_index = phba->hbq_get[hbqno];
James Smarted957682007-06-17 19:56:37 -05001961 uint32_t getidx = le32_to_cpu(raw_index);
1962
1963 hbqp->local_hbqGetIdx = getidx;
1964
1965 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1966 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05001967 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04001968 "1802 HBQ %d: local_hbqGetIdx "
James Smarted957682007-06-17 19:56:37 -05001969 "%u is > than hbqp->entry_count %u\n",
James Smarte8b62012007-08-02 11:10:09 -04001970 hbqno, hbqp->local_hbqGetIdx,
James Smarted957682007-06-17 19:56:37 -05001971 hbqp->entry_count);
1972
1973 phba->link_state = LPFC_HBA_ERROR;
1974 return NULL;
1975 }
1976
1977 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1978 return NULL;
1979 }
1980
James Smart51ef4c22007-08-02 11:10:31 -04001981 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1982 hbqp->hbqPutIdx;
James Smarted957682007-06-17 19:56:37 -05001983}
1984
James Smarte59058c2008-08-24 21:49:00 -04001985/**
James Smart3621a712009-04-06 18:47:14 -04001986 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
James Smarte59058c2008-08-24 21:49:00 -04001987 * @phba: Pointer to HBA context object.
1988 *
1989 * This function is called with no lock held to free all the
1990 * hbq buffers while uninitializing the SLI interface. It also
1991 * frees the HBQ buffers returned by the firmware but not yet
1992 * processed by the upper layers.
1993 **/
James Smarted957682007-06-17 19:56:37 -05001994void
1995lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1996{
James Smart92d7f7b2007-06-17 19:56:38 -05001997 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1998 struct hbq_dmabuf *hbq_buf;
James Smart3163f722008-02-08 18:50:25 -05001999 unsigned long flags;
James Smart51ef4c22007-08-02 11:10:31 -04002000 int i, hbq_count;
James Smarted957682007-06-17 19:56:37 -05002001
James Smart51ef4c22007-08-02 11:10:31 -04002002 hbq_count = lpfc_sli_hbq_count();
James Smarted957682007-06-17 19:56:37 -05002003 /* Return all memory used by all HBQs */
James Smart3163f722008-02-08 18:50:25 -05002004 spin_lock_irqsave(&phba->hbalock, flags);
James Smart51ef4c22007-08-02 11:10:31 -04002005 for (i = 0; i < hbq_count; ++i) {
2006 list_for_each_entry_safe(dmabuf, next_dmabuf,
2007 &phba->hbqs[i].hbq_buffer_list, list) {
2008 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2009 list_del(&hbq_buf->dbuf.list);
2010 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2011 }
James Smarta8adb832007-10-27 13:37:53 -04002012 phba->hbqs[i].buffer_count = 0;
James Smarted957682007-06-17 19:56:37 -05002013 }
James Smart3163f722008-02-08 18:50:25 -05002014
2015 /* Mark the HBQs not in use */
2016 phba->hbq_in_use = 0;
2017 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smarted957682007-06-17 19:56:37 -05002018}
2019
James Smarte59058c2008-08-24 21:49:00 -04002020/**
James Smart3621a712009-04-06 18:47:14 -04002021 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04002022 * @phba: Pointer to HBA context object.
2023 * @hbqno: HBQ number.
2024 * @hbq_buf: Pointer to HBQ buffer.
2025 *
2026 * This function is called with the hbalock held to post a
2027 * hbq buffer to the firmware. If the function finds an empty
2028 * slot in the HBQ, it will post the buffer. The function will return
2029 * pointer to the hbq entry if it successfully post the buffer
2030 * else it will return NULL.
2031 **/
James Smart3772a992009-05-22 14:50:54 -04002032static int
James Smarted957682007-06-17 19:56:37 -05002033lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
James Smart92d7f7b2007-06-17 19:56:38 -05002034 struct hbq_dmabuf *hbq_buf)
James Smarted957682007-06-17 19:56:37 -05002035{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002036 lockdep_assert_held(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04002037 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2038}
2039
2040/**
2041 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2042 * @phba: Pointer to HBA context object.
2043 * @hbqno: HBQ number.
2044 * @hbq_buf: Pointer to HBQ buffer.
2045 *
2046 * This function is called with the hbalock held to post a hbq buffer to the
2047 * firmware. If the function finds an empty slot in the HBQ, it will post the
2048 * buffer and place it on the hbq_buffer_list. The function will return zero if
2049 * it successfully post the buffer else it will return an error.
2050 **/
2051static int
2052lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2053 struct hbq_dmabuf *hbq_buf)
2054{
James Smarted957682007-06-17 19:56:37 -05002055 struct lpfc_hbq_entry *hbqe;
James Smart92d7f7b2007-06-17 19:56:38 -05002056 dma_addr_t physaddr = hbq_buf->dbuf.phys;
James Smarted957682007-06-17 19:56:37 -05002057
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002058 lockdep_assert_held(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05002059 /* Get next HBQ entry slot to use */
2060 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2061 if (hbqe) {
2062 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2063
James Smart92d7f7b2007-06-17 19:56:38 -05002064 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2065 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
James Smart895427b2017-02-12 13:52:30 -08002066 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
James Smarted957682007-06-17 19:56:37 -05002067 hbqe->bde.tus.f.bdeFlags = 0;
James Smart92d7f7b2007-06-17 19:56:38 -05002068 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2069 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2070 /* Sync SLIM */
James Smarted957682007-06-17 19:56:37 -05002071 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2072 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
James Smart92d7f7b2007-06-17 19:56:38 -05002073 /* flush */
James Smarted957682007-06-17 19:56:37 -05002074 readl(phba->hbq_put + hbqno);
James Smart51ef4c22007-08-02 11:10:31 -04002075 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
James Smart3772a992009-05-22 14:50:54 -04002076 return 0;
2077 } else
2078 return -ENOMEM;
James Smarted957682007-06-17 19:56:37 -05002079}
2080
James Smart4f774512009-05-22 14:52:35 -04002081/**
2082 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2083 * @phba: Pointer to HBA context object.
2084 * @hbqno: HBQ number.
2085 * @hbq_buf: Pointer to HBQ buffer.
2086 *
2087 * This function is called with the hbalock held to post an RQE to the SLI4
2088 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2089 * the hbq_buffer_list and return zero, otherwise it will return an error.
2090 **/
2091static int
2092lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2093 struct hbq_dmabuf *hbq_buf)
2094{
2095 int rc;
2096 struct lpfc_rqe hrqe;
2097 struct lpfc_rqe drqe;
James Smart895427b2017-02-12 13:52:30 -08002098 struct lpfc_queue *hrq;
2099 struct lpfc_queue *drq;
2100
2101 if (hbqno != LPFC_ELS_HBQ)
2102 return 1;
2103 hrq = phba->sli4_hba.hdr_rq;
2104 drq = phba->sli4_hba.dat_rq;
James Smart4f774512009-05-22 14:52:35 -04002105
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002106 lockdep_assert_held(&phba->hbalock);
James Smart4f774512009-05-22 14:52:35 -04002107 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2108 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2109 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2110 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
James Smart895427b2017-02-12 13:52:30 -08002111 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
James Smart4f774512009-05-22 14:52:35 -04002112 if (rc < 0)
2113 return rc;
James Smart895427b2017-02-12 13:52:30 -08002114 hbq_buf->tag = (rc | (hbqno << 16));
James Smart4f774512009-05-22 14:52:35 -04002115 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2116 return 0;
2117}
2118
James Smarte59058c2008-08-24 21:49:00 -04002119/* HBQ for ELS and CT traffic. */
James Smart92d7f7b2007-06-17 19:56:38 -05002120static struct lpfc_hbq_init lpfc_els_hbq = {
2121 .rn = 1,
James Smartdef9c7a2009-12-21 17:02:28 -05002122 .entry_count = 256,
James Smart92d7f7b2007-06-17 19:56:38 -05002123 .mask_count = 0,
2124 .profile = 0,
James Smart51ef4c22007-08-02 11:10:31 -04002125 .ring_mask = (1 << LPFC_ELS_RING),
James Smart92d7f7b2007-06-17 19:56:38 -05002126 .buffer_count = 0,
James Smarta257bf92009-04-06 18:48:10 -04002127 .init_count = 40,
2128 .add_count = 40,
James Smart92d7f7b2007-06-17 19:56:38 -05002129};
James Smarted957682007-06-17 19:56:37 -05002130
James Smarte59058c2008-08-24 21:49:00 -04002131/* Array of HBQs */
James Smart78b2d852007-08-02 11:10:21 -04002132struct lpfc_hbq_init *lpfc_hbq_defs[] = {
James Smart92d7f7b2007-06-17 19:56:38 -05002133 &lpfc_els_hbq,
2134};
2135
James Smarte59058c2008-08-24 21:49:00 -04002136/**
James Smart3621a712009-04-06 18:47:14 -04002137 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
James Smarte59058c2008-08-24 21:49:00 -04002138 * @phba: Pointer to HBA context object.
2139 * @hbqno: HBQ number.
2140 * @count: Number of HBQ buffers to be posted.
2141 *
James Smartd7c255b2008-08-24 21:50:00 -04002142 * This function is called with no lock held to post more hbq buffers to the
2143 * given HBQ. The function returns the number of HBQ buffers successfully
2144 * posted.
James Smarte59058c2008-08-24 21:49:00 -04002145 **/
James Smart311464e2007-08-02 11:10:37 -04002146static int
James Smart92d7f7b2007-06-17 19:56:38 -05002147lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2148{
James Smartd7c255b2008-08-24 21:50:00 -04002149 uint32_t i, posted = 0;
James Smart3163f722008-02-08 18:50:25 -05002150 unsigned long flags;
James Smart92d7f7b2007-06-17 19:56:38 -05002151 struct hbq_dmabuf *hbq_buffer;
James Smartd7c255b2008-08-24 21:50:00 -04002152 LIST_HEAD(hbq_buf_list);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07002153 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
James Smart51ef4c22007-08-02 11:10:31 -04002154 return 0;
James Smart51ef4c22007-08-02 11:10:31 -04002155
James Smartd7c255b2008-08-24 21:50:00 -04002156 if ((phba->hbqs[hbqno].buffer_count + count) >
2157 lpfc_hbq_defs[hbqno]->entry_count)
2158 count = lpfc_hbq_defs[hbqno]->entry_count -
2159 phba->hbqs[hbqno].buffer_count;
2160 if (!count)
2161 return 0;
2162 /* Allocate HBQ entries */
2163 for (i = 0; i < count; i++) {
2164 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2165 if (!hbq_buffer)
2166 break;
2167 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2168 }
James Smart3163f722008-02-08 18:50:25 -05002169 /* Check whether HBQ is still in use */
2170 spin_lock_irqsave(&phba->hbalock, flags);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07002171 if (!phba->hbq_in_use)
James Smartd7c255b2008-08-24 21:50:00 -04002172 goto err;
2173 while (!list_empty(&hbq_buf_list)) {
2174 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2175 dbuf.list);
2176 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2177 (hbqno << 16));
James Smart3772a992009-05-22 14:50:54 -04002178 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
James Smarta8adb832007-10-27 13:37:53 -04002179 phba->hbqs[hbqno].buffer_count++;
James Smartd7c255b2008-08-24 21:50:00 -04002180 posted++;
2181 } else
James Smart51ef4c22007-08-02 11:10:31 -04002182 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smart92d7f7b2007-06-17 19:56:38 -05002183 }
James Smart3163f722008-02-08 18:50:25 -05002184 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smartd7c255b2008-08-24 21:50:00 -04002185 return posted;
2186err:
2187 spin_unlock_irqrestore(&phba->hbalock, flags);
2188 while (!list_empty(&hbq_buf_list)) {
2189 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2190 dbuf.list);
2191 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2192 }
James Smart92d7f7b2007-06-17 19:56:38 -05002193 return 0;
James Smarted957682007-06-17 19:56:37 -05002194}
2195
James Smarte59058c2008-08-24 21:49:00 -04002196/**
James Smart3621a712009-04-06 18:47:14 -04002197 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
James Smarte59058c2008-08-24 21:49:00 -04002198 * @phba: Pointer to HBA context object.
2199 * @qno: HBQ number.
2200 *
2201 * This function posts more buffers to the HBQ. This function
James Smartd7c255b2008-08-24 21:50:00 -04002202 * is called with no lock held. The function returns the number of HBQ entries
2203 * successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04002204 **/
James Smarted957682007-06-17 19:56:37 -05002205int
James Smart92d7f7b2007-06-17 19:56:38 -05002206lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05002207{
James Smartdef9c7a2009-12-21 17:02:28 -05002208 if (phba->sli_rev == LPFC_SLI_REV4)
2209 return 0;
2210 else
2211 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2212 lpfc_hbq_defs[qno]->add_count);
James Smarted957682007-06-17 19:56:37 -05002213}
2214
James Smarte59058c2008-08-24 21:49:00 -04002215/**
James Smart3621a712009-04-06 18:47:14 -04002216 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
James Smarte59058c2008-08-24 21:49:00 -04002217 * @phba: Pointer to HBA context object.
2218 * @qno: HBQ queue number.
2219 *
2220 * This function is called from SLI initialization code path with
2221 * no lock held to post initial HBQ buffers to firmware. The
James Smartd7c255b2008-08-24 21:50:00 -04002222 * function returns the number of HBQ entries successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04002223 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002224static int
James Smart92d7f7b2007-06-17 19:56:38 -05002225lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05002226{
James Smartdef9c7a2009-12-21 17:02:28 -05002227 if (phba->sli_rev == LPFC_SLI_REV4)
2228 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
James Smart73d91e52011-10-10 21:32:10 -04002229 lpfc_hbq_defs[qno]->entry_count);
James Smartdef9c7a2009-12-21 17:02:28 -05002230 else
2231 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2232 lpfc_hbq_defs[qno]->init_count);
James Smarted957682007-06-17 19:56:37 -05002233}
2234
James Smarte59058c2008-08-24 21:49:00 -04002235/**
James Smart3772a992009-05-22 14:50:54 -04002236 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2237 * @phba: Pointer to HBA context object.
2238 * @hbqno: HBQ number.
2239 *
2240 * This function removes the first hbq buffer on an hbq list and returns a
2241 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2242 **/
2243static struct hbq_dmabuf *
2244lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2245{
2246 struct lpfc_dmabuf *d_buf;
2247
2248 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2249 if (!d_buf)
2250 return NULL;
2251 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2252}
2253
2254/**
James Smart2d7dbc42017-02-12 13:52:35 -08002255 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2256 * @phba: Pointer to HBA context object.
2257 * @hbqno: HBQ number.
2258 *
2259 * This function removes the first RQ buffer on an RQ buffer list and returns a
2260 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2261 **/
2262static struct rqb_dmabuf *
2263lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2264{
2265 struct lpfc_dmabuf *h_buf;
2266 struct lpfc_rqb *rqbp;
2267
2268 rqbp = hrq->rqbp;
2269 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2270 struct lpfc_dmabuf, list);
2271 if (!h_buf)
2272 return NULL;
2273 rqbp->buffer_count--;
2274 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2275}
2276
2277/**
James Smart3621a712009-04-06 18:47:14 -04002278 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
James Smarte59058c2008-08-24 21:49:00 -04002279 * @phba: Pointer to HBA context object.
2280 * @tag: Tag of the hbq buffer.
2281 *
Sebastian Herbszt71892412016-04-17 13:27:27 +02002282 * This function searches for the hbq buffer associated with the given tag in
2283 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2284 * otherwise it returns NULL.
James Smarte59058c2008-08-24 21:49:00 -04002285 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002286static struct hbq_dmabuf *
James Smarted957682007-06-17 19:56:37 -05002287lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2288{
James Smart92d7f7b2007-06-17 19:56:38 -05002289 struct lpfc_dmabuf *d_buf;
2290 struct hbq_dmabuf *hbq_buf;
James Smart51ef4c22007-08-02 11:10:31 -04002291 uint32_t hbqno;
James Smarted957682007-06-17 19:56:37 -05002292
James Smart51ef4c22007-08-02 11:10:31 -04002293 hbqno = tag >> 16;
Jesper Juhla0a74e452007-08-09 20:47:15 +02002294 if (hbqno >= LPFC_MAX_HBQS)
James Smart51ef4c22007-08-02 11:10:31 -04002295 return NULL;
2296
James Smart3772a992009-05-22 14:50:54 -04002297 spin_lock_irq(&phba->hbalock);
James Smart51ef4c22007-08-02 11:10:31 -04002298 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
James Smart92d7f7b2007-06-17 19:56:38 -05002299 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
James Smart51ef4c22007-08-02 11:10:31 -04002300 if (hbq_buf->tag == tag) {
James Smart3772a992009-05-22 14:50:54 -04002301 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002302 return hbq_buf;
James Smarted957682007-06-17 19:56:37 -05002303 }
2304 }
James Smart3772a992009-05-22 14:50:54 -04002305 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002306 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04002307 "1803 Bad hbq tag. Data: x%x x%x\n",
James Smarta8adb832007-10-27 13:37:53 -04002308 tag, phba->hbqs[tag >> 16].buffer_count);
James Smart92d7f7b2007-06-17 19:56:38 -05002309 return NULL;
James Smarted957682007-06-17 19:56:37 -05002310}
2311
James Smarte59058c2008-08-24 21:49:00 -04002312/**
James Smart3621a712009-04-06 18:47:14 -04002313 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04002314 * @phba: Pointer to HBA context object.
2315 * @hbq_buffer: Pointer to HBQ buffer.
2316 *
2317 * This function is called with hbalock. This function gives back
2318 * the hbq buffer to firmware. If the HBQ does not have space to
2319 * post the buffer, it will free the buffer.
2320 **/
James Smarted957682007-06-17 19:56:37 -05002321void
James Smart51ef4c22007-08-02 11:10:31 -04002322lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
James Smarted957682007-06-17 19:56:37 -05002323{
2324 uint32_t hbqno;
2325
James Smart51ef4c22007-08-02 11:10:31 -04002326 if (hbq_buffer) {
2327 hbqno = hbq_buffer->tag >> 16;
James Smart3772a992009-05-22 14:50:54 -04002328 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
James Smart51ef4c22007-08-02 11:10:31 -04002329 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smarted957682007-06-17 19:56:37 -05002330 }
2331}
2332
James Smarte59058c2008-08-24 21:49:00 -04002333/**
James Smart3621a712009-04-06 18:47:14 -04002334 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
James Smarte59058c2008-08-24 21:49:00 -04002335 * @mbxCommand: mailbox command code.
2336 *
2337 * This function is called by the mailbox event handler function to verify
2338 * that the completed mailbox command is a legitimate mailbox command. If the
2339 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2340 * and the mailbox event handler will take the HBA offline.
2341 **/
dea31012005-04-17 16:05:31 -05002342static int
2343lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2344{
2345 uint8_t ret;
2346
2347 switch (mbxCommand) {
2348 case MBX_LOAD_SM:
2349 case MBX_READ_NV:
2350 case MBX_WRITE_NV:
James Smarta8adb832007-10-27 13:37:53 -04002351 case MBX_WRITE_VPARMS:
dea31012005-04-17 16:05:31 -05002352 case MBX_RUN_BIU_DIAG:
2353 case MBX_INIT_LINK:
2354 case MBX_DOWN_LINK:
2355 case MBX_CONFIG_LINK:
2356 case MBX_CONFIG_RING:
2357 case MBX_RESET_RING:
2358 case MBX_READ_CONFIG:
2359 case MBX_READ_RCONFIG:
2360 case MBX_READ_SPARM:
2361 case MBX_READ_STATUS:
2362 case MBX_READ_RPI:
2363 case MBX_READ_XRI:
2364 case MBX_READ_REV:
2365 case MBX_READ_LNK_STAT:
2366 case MBX_REG_LOGIN:
2367 case MBX_UNREG_LOGIN:
dea31012005-04-17 16:05:31 -05002368 case MBX_CLEAR_LA:
2369 case MBX_DUMP_MEMORY:
2370 case MBX_DUMP_CONTEXT:
2371 case MBX_RUN_DIAGS:
2372 case MBX_RESTART:
2373 case MBX_UPDATE_CFG:
2374 case MBX_DOWN_LOAD:
2375 case MBX_DEL_LD_ENTRY:
2376 case MBX_RUN_PROGRAM:
2377 case MBX_SET_MASK:
James Smart09372822008-01-11 01:52:54 -05002378 case MBX_SET_VARIABLE:
dea31012005-04-17 16:05:31 -05002379 case MBX_UNREG_D_ID:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002380 case MBX_KILL_BOARD:
dea31012005-04-17 16:05:31 -05002381 case MBX_CONFIG_FARP:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002382 case MBX_BEACON:
dea31012005-04-17 16:05:31 -05002383 case MBX_LOAD_AREA:
2384 case MBX_RUN_BIU_DIAG64:
2385 case MBX_CONFIG_PORT:
2386 case MBX_READ_SPARM64:
2387 case MBX_READ_RPI64:
2388 case MBX_REG_LOGIN64:
James Smart76a95d72010-11-20 23:11:48 -05002389 case MBX_READ_TOPOLOGY:
James Smart09372822008-01-11 01:52:54 -05002390 case MBX_WRITE_WWN:
dea31012005-04-17 16:05:31 -05002391 case MBX_SET_DEBUG:
2392 case MBX_LOAD_EXP_ROM:
James Smart57127f12007-10-27 13:37:05 -04002393 case MBX_ASYNCEVT_ENABLE:
James Smart92d7f7b2007-06-17 19:56:38 -05002394 case MBX_REG_VPI:
2395 case MBX_UNREG_VPI:
James Smart858c9f62007-06-17 19:56:39 -05002396 case MBX_HEARTBEAT:
James Smart84774a42008-08-24 21:50:06 -04002397 case MBX_PORT_CAPABILITIES:
2398 case MBX_PORT_IOV_CONTROL:
James Smart04c68492009-05-22 14:52:52 -04002399 case MBX_SLI4_CONFIG:
2400 case MBX_SLI4_REQ_FTRS:
2401 case MBX_REG_FCFI:
2402 case MBX_UNREG_FCFI:
2403 case MBX_REG_VFI:
2404 case MBX_UNREG_VFI:
2405 case MBX_INIT_VPI:
2406 case MBX_INIT_VFI:
2407 case MBX_RESUME_RPI:
James Smartc7495932010-04-06 15:05:28 -04002408 case MBX_READ_EVENT_LOG_STATUS:
2409 case MBX_READ_EVENT_LOG:
James Smartdcf2a4e2010-09-29 11:18:53 -04002410 case MBX_SECURITY_MGMT:
2411 case MBX_AUTH_PORT:
James Smart940eb682012-08-03 12:37:08 -04002412 case MBX_ACCESS_VDATA:
dea31012005-04-17 16:05:31 -05002413 ret = mbxCommand;
2414 break;
2415 default:
2416 ret = MBX_SHUTDOWN;
2417 break;
2418 }
James Smart2e0fef82007-06-17 19:56:36 -05002419 return ret;
dea31012005-04-17 16:05:31 -05002420}
James Smarte59058c2008-08-24 21:49:00 -04002421
2422/**
James Smart3621a712009-04-06 18:47:14 -04002423 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002424 * @phba: Pointer to HBA context object.
2425 * @pmboxq: Pointer to mailbox command.
2426 *
2427 * This is completion handler function for mailbox commands issued from
2428 * lpfc_sli_issue_mbox_wait function. This function is called by the
2429 * mailbox event handler function with no lock held. This function
2430 * will wake up thread waiting on the wait queue pointed by context1
2431 * of the mailbox.
2432 **/
James Smart04c68492009-05-22 14:52:52 -04002433void
James Smart2e0fef82007-06-17 19:56:36 -05002434lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea31012005-04-17 16:05:31 -05002435{
James Smart858c9f62007-06-17 19:56:39 -05002436 unsigned long drvr_flag;
James Smarte29d74f2018-03-05 12:04:07 -08002437 struct completion *pmbox_done;
dea31012005-04-17 16:05:31 -05002438
2439 /*
James Smarte29d74f2018-03-05 12:04:07 -08002440 * If pmbox_done is empty, the driver thread gave up waiting and
dea31012005-04-17 16:05:31 -05002441 * continued running.
2442 */
James Smart7054a602007-04-25 09:52:34 -04002443 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
James Smart858c9f62007-06-17 19:56:39 -05002444 spin_lock_irqsave(&phba->hbalock, drvr_flag);
James Smarte29d74f2018-03-05 12:04:07 -08002445 pmbox_done = (struct completion *)pmboxq->context3;
2446 if (pmbox_done)
2447 complete(pmbox_done);
James Smart858c9f62007-06-17 19:56:39 -05002448 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002449 return;
2450}
2451
James Smartb95b2112019-08-14 16:56:47 -07002452static void
2453__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2454{
2455 unsigned long iflags;
2456
2457 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2458 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2459 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2460 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2461 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2462 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2463 }
2464 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2465}
James Smarte59058c2008-08-24 21:49:00 -04002466
2467/**
James Smart3621a712009-04-06 18:47:14 -04002468 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002469 * @phba: Pointer to HBA context object.
2470 * @pmb: Pointer to mailbox object.
2471 *
2472 * This function is the default mailbox completion handler. It
2473 * frees the memory resources associated with the completed mailbox
2474 * command. If the completed command is a REG_LOGIN mailbox command,
2475 * this function will issue a UREG_LOGIN to re-claim the RPI.
2476 **/
dea31012005-04-17 16:05:31 -05002477void
James Smart2e0fef82007-06-17 19:56:36 -05002478lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea31012005-04-17 16:05:31 -05002479{
James Smartd439d282010-09-29 11:18:45 -04002480 struct lpfc_vport *vport = pmb->vport;
dea31012005-04-17 16:05:31 -05002481 struct lpfc_dmabuf *mp;
James Smartd439d282010-09-29 11:18:45 -04002482 struct lpfc_nodelist *ndlp;
James Smart5af5eee2010-10-22 11:06:38 -04002483 struct Scsi_Host *shost;
James Smart04c68492009-05-22 14:52:52 -04002484 uint16_t rpi, vpi;
James Smart7054a602007-04-25 09:52:34 -04002485 int rc;
2486
James Smart3e1f0712018-11-29 16:09:29 -08002487 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
James Smart7054a602007-04-25 09:52:34 -04002488
dea31012005-04-17 16:05:31 -05002489 if (mp) {
2490 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2491 kfree(mp);
2492 }
James Smart7054a602007-04-25 09:52:34 -04002493
2494 /*
2495 * If a REG_LOGIN succeeded after node is destroyed or node
2496 * is in re-discovery driver need to cleanup the RPI.
2497 */
James Smart2e0fef82007-06-17 19:56:36 -05002498 if (!(phba->pport->load_flag & FC_UNLOADING) &&
James Smart04c68492009-05-22 14:52:52 -04002499 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2500 !pmb->u.mb.mbxStatus) {
2501 rpi = pmb->u.mb.un.varWords[0];
James Smart6d368e52011-05-24 11:44:12 -04002502 vpi = pmb->u.mb.un.varRegLogin.vpi;
James Smart38503942020-03-22 11:12:53 -07002503 if (phba->sli_rev == LPFC_SLI_REV4)
2504 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
James Smart04c68492009-05-22 14:52:52 -04002505 lpfc_unreg_login(phba, vpi, rpi, pmb);
James Smartde96e9c2016-03-31 14:12:27 -07002506 pmb->vport = vport;
James Smart92d7f7b2007-06-17 19:56:38 -05002507 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart7054a602007-04-25 09:52:34 -04002508 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2509 if (rc != MBX_NOT_FINISHED)
2510 return;
2511 }
2512
James Smart695a8142010-01-26 23:08:03 -05002513 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2514 !(phba->pport->load_flag & FC_UNLOADING) &&
2515 !pmb->u.mb.mbxStatus) {
James Smart5af5eee2010-10-22 11:06:38 -04002516 shost = lpfc_shost_from_vport(vport);
2517 spin_lock_irq(shost->host_lock);
2518 vport->vpi_state |= LPFC_VPI_REGISTERED;
2519 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2520 spin_unlock_irq(shost->host_lock);
James Smart695a8142010-01-26 23:08:03 -05002521 }
2522
James Smartd439d282010-09-29 11:18:45 -04002523 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -08002524 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
James Smartd439d282010-09-29 11:18:45 -04002525 lpfc_nlp_put(ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002526 pmb->ctx_buf = NULL;
2527 pmb->ctx_ndlp = NULL;
2528 }
2529
2530 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2531 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2532
2533 /* Check to see if there are any deferred events to process */
2534 if (ndlp) {
2535 lpfc_printf_vlog(
2536 vport,
2537 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2538 "1438 UNREG cmpl deferred mbox x%x "
James Smart32350662019-08-14 16:57:06 -07002539 "on NPort x%x Data: x%x x%x %px\n",
James Smartdea16bd2018-11-29 16:09:30 -08002540 ndlp->nlp_rpi, ndlp->nlp_DID,
2541 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2542
2543 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2544 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
James Smart00292e02018-12-13 15:17:55 -08002545 ndlp->nlp_flag &= ~NLP_UNREG_INP;
James Smartdea16bd2018-11-29 16:09:30 -08002546 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2547 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
James Smart00292e02018-12-13 15:17:55 -08002548 } else {
James Smartb95b2112019-08-14 16:56:47 -07002549 __lpfc_sli_rpi_release(vport, ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002550 }
James Smart97acd002019-09-21 20:58:51 -07002551 if (vport->load_flag & FC_UNLOADING)
2552 lpfc_nlp_put(ndlp);
James Smart9b164062019-03-12 16:30:06 -07002553 pmb->ctx_ndlp = NULL;
James Smartdea16bd2018-11-29 16:09:30 -08002554 }
James Smartd439d282010-09-29 11:18:45 -04002555 }
2556
James Smartdcf2a4e2010-09-29 11:18:53 -04002557 /* Check security permission status on INIT_LINK mailbox command */
2558 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2559 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2561 "2860 SLI authentication is required "
2562 "for INIT_LINK but has not done yet\n");
2563
James Smart04c68492009-05-22 14:52:52 -04002564 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2565 lpfc_sli4_mbox_cmd_free(phba, pmb);
2566 else
2567 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -05002568}
James Smartbe6bb942015-04-07 15:07:22 -04002569 /**
2570 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2571 * @phba: Pointer to HBA context object.
2572 * @pmb: Pointer to mailbox object.
2573 *
2574 * This function is the unreg rpi mailbox completion handler. It
2575 * frees the memory resources associated with the completed mailbox
2576 * command. An additional refrenece is put on the ndlp to prevent
2577 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2578 * the unreg mailbox command completes, this routine puts the
2579 * reference back.
2580 *
2581 **/
2582void
2583lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2584{
2585 struct lpfc_vport *vport = pmb->vport;
2586 struct lpfc_nodelist *ndlp;
2587
James Smart3e1f0712018-11-29 16:09:29 -08002588 ndlp = pmb->ctx_ndlp;
James Smartbe6bb942015-04-07 15:07:22 -04002589 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2590 if (phba->sli_rev == LPFC_SLI_REV4 &&
2591 (bf_get(lpfc_sli_intf_if_type,
James Smart27d6ac02018-02-22 08:18:42 -08002592 &phba->sli4_hba.sli_intf) >=
James Smartbe6bb942015-04-07 15:07:22 -04002593 LPFC_SLI_INTF_IF_TYPE_2)) {
2594 if (ndlp) {
James Smartdea16bd2018-11-29 16:09:30 -08002595 lpfc_printf_vlog(
2596 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2597 "0010 UNREG_LOGIN vpi:%x "
2598 "rpi:%x DID:%x defer x%x flg x%x "
James Smart32350662019-08-14 16:57:06 -07002599 "map:%x %px\n",
James Smartdea16bd2018-11-29 16:09:30 -08002600 vport->vpi, ndlp->nlp_rpi,
2601 ndlp->nlp_DID, ndlp->nlp_defer_did,
2602 ndlp->nlp_flag,
2603 ndlp->nlp_usg_map, ndlp);
James Smart7c5e5182015-05-22 10:42:43 -04002604 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
James Smartbe6bb942015-04-07 15:07:22 -04002605 lpfc_nlp_put(ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002606
2607 /* Check to see if there are any deferred
2608 * events to process
2609 */
2610 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2611 (ndlp->nlp_defer_did !=
2612 NLP_EVT_NOTHING_PENDING)) {
2613 lpfc_printf_vlog(
2614 vport, KERN_INFO, LOG_DISCOVERY,
2615 "4111 UNREG cmpl deferred "
2616 "clr x%x on "
James Smart32350662019-08-14 16:57:06 -07002617 "NPort x%x Data: x%x x%px\n",
James Smartdea16bd2018-11-29 16:09:30 -08002618 ndlp->nlp_rpi, ndlp->nlp_DID,
2619 ndlp->nlp_defer_did, ndlp);
James Smart00292e02018-12-13 15:17:55 -08002620 ndlp->nlp_flag &= ~NLP_UNREG_INP;
James Smartdea16bd2018-11-29 16:09:30 -08002621 ndlp->nlp_defer_did =
2622 NLP_EVT_NOTHING_PENDING;
2623 lpfc_issue_els_plogi(
2624 vport, ndlp->nlp_DID, 0);
James Smart00292e02018-12-13 15:17:55 -08002625 } else {
James Smartb95b2112019-08-14 16:56:47 -07002626 __lpfc_sli_rpi_release(vport, ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002627 }
James Smartbe6bb942015-04-07 15:07:22 -04002628 }
2629 }
2630 }
2631
2632 mempool_free(pmb, phba->mbox_mem_pool);
2633}
dea31012005-04-17 16:05:31 -05002634
James Smarte59058c2008-08-24 21:49:00 -04002635/**
James Smart3621a712009-04-06 18:47:14 -04002636 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
James Smarte59058c2008-08-24 21:49:00 -04002637 * @phba: Pointer to HBA context object.
2638 *
2639 * This function is called with no lock held. This function processes all
2640 * the completed mailbox commands and gives it to upper layers. The interrupt
2641 * service routine processes mailbox completion interrupt and adds completed
2642 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2643 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2644 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2645 * function returns the mailbox commands to the upper layer by calling the
2646 * completion handler function of each mailbox.
2647 **/
dea31012005-04-17 16:05:31 -05002648int
James Smart2e0fef82007-06-17 19:56:36 -05002649lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002650{
James Smart92d7f7b2007-06-17 19:56:38 -05002651 MAILBOX_t *pmbox;
dea31012005-04-17 16:05:31 -05002652 LPFC_MBOXQ_t *pmb;
James Smart92d7f7b2007-06-17 19:56:38 -05002653 int rc;
2654 LIST_HEAD(cmplq);
dea31012005-04-17 16:05:31 -05002655
2656 phba->sli.slistat.mbox_event++;
2657
James Smart92d7f7b2007-06-17 19:56:38 -05002658 /* Get all completed mailboxe buffers into the cmplq */
2659 spin_lock_irq(&phba->hbalock);
2660 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2661 spin_unlock_irq(&phba->hbalock);
2662
dea31012005-04-17 16:05:31 -05002663 /* Get a Mailbox buffer to setup mailbox commands for callback */
James Smart92d7f7b2007-06-17 19:56:38 -05002664 do {
2665 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2666 if (pmb == NULL)
2667 break;
2668
James Smart04c68492009-05-22 14:52:52 -04002669 pmbox = &pmb->u.mb;
dea31012005-04-17 16:05:31 -05002670
James Smart858c9f62007-06-17 19:56:39 -05002671 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2672 if (pmb->vport) {
2673 lpfc_debugfs_disc_trc(pmb->vport,
2674 LPFC_DISC_TRC_MBOX_VPORT,
2675 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2676 (uint32_t)pmbox->mbxCommand,
2677 pmbox->un.varWords[0],
2678 pmbox->un.varWords[1]);
2679 }
2680 else {
2681 lpfc_debugfs_disc_trc(phba->pport,
2682 LPFC_DISC_TRC_MBOX,
2683 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2684 (uint32_t)pmbox->mbxCommand,
2685 pmbox->un.varWords[0],
2686 pmbox->un.varWords[1]);
2687 }
2688 }
2689
dea31012005-04-17 16:05:31 -05002690 /*
2691 * It is a fatal error if unknown mbox command completion.
2692 */
2693 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2694 MBX_SHUTDOWN) {
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002695 /* Unknown mailbox command compl */
James Smart92d7f7b2007-06-17 19:56:38 -05002696 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002697 "(%d):0323 Unknown Mailbox command "
James Smarta183a152011-10-10 21:32:43 -04002698 "x%x (x%x/x%x) Cmpl\n",
James Smart43bfea12019-09-21 20:58:57 -07002699 pmb->vport ? pmb->vport->vpi :
2700 LPFC_VPORT_UNKNOWN,
James Smart04c68492009-05-22 14:52:52 -04002701 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002702 lpfc_sli_config_mbox_subsys_get(phba,
2703 pmb),
2704 lpfc_sli_config_mbox_opcode_get(phba,
2705 pmb));
James Smart2e0fef82007-06-17 19:56:36 -05002706 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05002707 phba->work_hs = HS_FFER3;
2708 lpfc_handle_eratt(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05002709 continue;
dea31012005-04-17 16:05:31 -05002710 }
2711
dea31012005-04-17 16:05:31 -05002712 if (pmbox->mbxStatus) {
2713 phba->sli.slistat.mbox_stat_err++;
2714 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2715 /* Mbox cmd cmpl error - RETRYing */
James Smart92d7f7b2007-06-17 19:56:38 -05002716 lpfc_printf_log(phba, KERN_INFO,
James Smarta183a152011-10-10 21:32:43 -04002717 LOG_MBOX | LOG_SLI,
2718 "(%d):0305 Mbox cmd cmpl "
2719 "error - RETRYing Data: x%x "
2720 "(x%x/x%x) x%x x%x x%x\n",
James Smart43bfea12019-09-21 20:58:57 -07002721 pmb->vport ? pmb->vport->vpi :
2722 LPFC_VPORT_UNKNOWN,
James Smarta183a152011-10-10 21:32:43 -04002723 pmbox->mbxCommand,
2724 lpfc_sli_config_mbox_subsys_get(phba,
2725 pmb),
2726 lpfc_sli_config_mbox_opcode_get(phba,
2727 pmb),
2728 pmbox->mbxStatus,
2729 pmbox->un.varWords[0],
James Smart43bfea12019-09-21 20:58:57 -07002730 pmb->vport ? pmb->vport->port_state :
2731 LPFC_VPORT_UNKNOWN);
dea31012005-04-17 16:05:31 -05002732 pmbox->mbxStatus = 0;
2733 pmbox->mbxOwner = OWN_HOST;
dea31012005-04-17 16:05:31 -05002734 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
James Smart04c68492009-05-22 14:52:52 -04002735 if (rc != MBX_NOT_FINISHED)
James Smart92d7f7b2007-06-17 19:56:38 -05002736 continue;
dea31012005-04-17 16:05:31 -05002737 }
2738 }
2739
2740 /* Mailbox cmd <cmd> Cmpl <cmpl> */
James Smart92d7f7b2007-06-17 19:56:38 -05002741 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
Sakari Ailus2d44d162019-09-04 19:04:23 +03002742 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
James Smarte74c03c2013-04-17 20:15:19 -04002743 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2744 "x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002745 pmb->vport ? pmb->vport->vpi : 0,
dea31012005-04-17 16:05:31 -05002746 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002747 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2748 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea31012005-04-17 16:05:31 -05002749 pmb->mbox_cmpl,
2750 *((uint32_t *) pmbox),
2751 pmbox->un.varWords[0],
2752 pmbox->un.varWords[1],
2753 pmbox->un.varWords[2],
2754 pmbox->un.varWords[3],
2755 pmbox->un.varWords[4],
2756 pmbox->un.varWords[5],
2757 pmbox->un.varWords[6],
James Smarte74c03c2013-04-17 20:15:19 -04002758 pmbox->un.varWords[7],
2759 pmbox->un.varWords[8],
2760 pmbox->un.varWords[9],
2761 pmbox->un.varWords[10]);
dea31012005-04-17 16:05:31 -05002762
James Smart92d7f7b2007-06-17 19:56:38 -05002763 if (pmb->mbox_cmpl)
dea31012005-04-17 16:05:31 -05002764 pmb->mbox_cmpl(phba,pmb);
James Smart92d7f7b2007-06-17 19:56:38 -05002765 } while (1);
James Smart2e0fef82007-06-17 19:56:36 -05002766 return 0;
dea31012005-04-17 16:05:31 -05002767}
James Smart92d7f7b2007-06-17 19:56:38 -05002768
James Smarte59058c2008-08-24 21:49:00 -04002769/**
James Smart3621a712009-04-06 18:47:14 -04002770 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
James Smarte59058c2008-08-24 21:49:00 -04002771 * @phba: Pointer to HBA context object.
2772 * @pring: Pointer to driver SLI ring object.
2773 * @tag: buffer tag.
2774 *
2775 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2776 * is set in the tag the buffer is posted for a particular exchange,
2777 * the function will return the buffer without replacing the buffer.
2778 * If the buffer is for unsolicited ELS or CT traffic, this function
2779 * returns the buffer and also posts another buffer to the firmware.
2780 **/
James Smart76bb24e2007-10-27 13:38:00 -04002781static struct lpfc_dmabuf *
2782lpfc_sli_get_buff(struct lpfc_hba *phba,
James Smart9f1e1b52008-12-04 22:39:40 -05002783 struct lpfc_sli_ring *pring,
2784 uint32_t tag)
James Smart76bb24e2007-10-27 13:38:00 -04002785{
James Smart9f1e1b52008-12-04 22:39:40 -05002786 struct hbq_dmabuf *hbq_entry;
2787
James Smart76bb24e2007-10-27 13:38:00 -04002788 if (tag & QUE_BUFTAG_BIT)
2789 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
James Smart9f1e1b52008-12-04 22:39:40 -05002790 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2791 if (!hbq_entry)
2792 return NULL;
2793 return &hbq_entry->dbuf;
James Smart76bb24e2007-10-27 13:38:00 -04002794}
James Smart57127f12007-10-27 13:37:05 -04002795
James Smart3772a992009-05-22 14:50:54 -04002796/**
2797 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2798 * @phba: Pointer to HBA context object.
2799 * @pring: Pointer to driver SLI ring object.
2800 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2801 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2802 * @fch_type: the type for the first frame of the sequence.
2803 *
2804 * This function is called with no lock held. This function uses the r_ctl and
2805 * type of the received sequence to find the correct callback function to call
2806 * to process the sequence.
2807 **/
2808static int
2809lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2810 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2811 uint32_t fch_type)
2812{
2813 int i;
2814
James Smartf358dd02017-02-12 13:52:34 -08002815 switch (fch_type) {
2816 case FC_TYPE_NVME:
James Smartd613b6a2017-02-12 13:52:37 -08002817 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
James Smartf358dd02017-02-12 13:52:34 -08002818 return 1;
2819 default:
2820 break;
2821 }
2822
James Smart3772a992009-05-22 14:50:54 -04002823 /* unSolicited Responses */
2824 if (pring->prt[0].profile) {
2825 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2826 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2827 saveq);
2828 return 1;
2829 }
2830 /* We must search, based on rctl / type
2831 for the right routine */
2832 for (i = 0; i < pring->num_mask; i++) {
2833 if ((pring->prt[i].rctl == fch_r_ctl) &&
2834 (pring->prt[i].type == fch_type)) {
2835 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2836 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2837 (phba, pring, saveq);
2838 return 1;
2839 }
2840 }
2841 return 0;
2842}
James Smarte59058c2008-08-24 21:49:00 -04002843
2844/**
James Smart3621a712009-04-06 18:47:14 -04002845 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
James Smarte59058c2008-08-24 21:49:00 -04002846 * @phba: Pointer to HBA context object.
2847 * @pring: Pointer to driver SLI ring object.
2848 * @saveq: Pointer to the unsolicited iocb.
2849 *
2850 * This function is called with no lock held by the ring event handler
2851 * when there is an unsolicited iocb posted to the response ring by the
2852 * firmware. This function gets the buffer associated with the iocbs
2853 * and calls the event handler for the ring. This function handles both
2854 * qring buffers and hbq buffers.
2855 * When the function returns 1 the caller can free the iocb object otherwise
2856 * upper layer functions will free the iocb objects.
2857 **/
dea31012005-04-17 16:05:31 -05002858static int
2859lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2860 struct lpfc_iocbq *saveq)
2861{
2862 IOCB_t * irsp;
2863 WORD5 * w5p;
2864 uint32_t Rctl, Type;
James Smart76bb24e2007-10-27 13:38:00 -04002865 struct lpfc_iocbq *iocbq;
James Smart3163f722008-02-08 18:50:25 -05002866 struct lpfc_dmabuf *dmzbuf;
dea31012005-04-17 16:05:31 -05002867
dea31012005-04-17 16:05:31 -05002868 irsp = &(saveq->iocb);
James Smart57127f12007-10-27 13:37:05 -04002869
2870 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2871 if (pring->lpfc_sli_rcv_async_status)
2872 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2873 else
2874 lpfc_printf_log(phba,
2875 KERN_WARNING,
2876 LOG_SLI,
2877 "0316 Ring %d handler: unexpected "
2878 "ASYNC_STATUS iocb received evt_code "
2879 "0x%x\n",
2880 pring->ringno,
2881 irsp->un.asyncstat.evt_code);
2882 return 1;
2883 }
2884
James Smart3163f722008-02-08 18:50:25 -05002885 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2886 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2887 if (irsp->ulpBdeCount > 0) {
2888 dmzbuf = lpfc_sli_get_buff(phba, pring,
2889 irsp->un.ulpWord[3]);
2890 lpfc_in_buf_free(phba, dmzbuf);
2891 }
2892
2893 if (irsp->ulpBdeCount > 1) {
2894 dmzbuf = lpfc_sli_get_buff(phba, pring,
2895 irsp->unsli3.sli3Words[3]);
2896 lpfc_in_buf_free(phba, dmzbuf);
2897 }
2898
2899 if (irsp->ulpBdeCount > 2) {
2900 dmzbuf = lpfc_sli_get_buff(phba, pring,
2901 irsp->unsli3.sli3Words[7]);
2902 lpfc_in_buf_free(phba, dmzbuf);
2903 }
2904
2905 return 1;
2906 }
2907
James Smart92d7f7b2007-06-17 19:56:38 -05002908 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
James Smart76bb24e2007-10-27 13:38:00 -04002909 if (irsp->ulpBdeCount != 0) {
2910 saveq->context2 = lpfc_sli_get_buff(phba, pring,
James Smart92d7f7b2007-06-17 19:56:38 -05002911 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002912 if (!saveq->context2)
2913 lpfc_printf_log(phba,
2914 KERN_ERR,
2915 LOG_SLI,
2916 "0341 Ring %d Cannot find buffer for "
2917 "an unsolicited iocb. tag 0x%x\n",
2918 pring->ringno,
2919 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002920 }
2921 if (irsp->ulpBdeCount == 2) {
2922 saveq->context3 = lpfc_sli_get_buff(phba, pring,
James Smart51ef4c22007-08-02 11:10:31 -04002923 irsp->unsli3.sli3Words[7]);
James Smart76bb24e2007-10-27 13:38:00 -04002924 if (!saveq->context3)
2925 lpfc_printf_log(phba,
2926 KERN_ERR,
2927 LOG_SLI,
2928 "0342 Ring %d Cannot find buffer for an"
2929 " unsolicited iocb. tag 0x%x\n",
2930 pring->ringno,
2931 irsp->unsli3.sli3Words[7]);
2932 }
2933 list_for_each_entry(iocbq, &saveq->list, list) {
James Smart76bb24e2007-10-27 13:38:00 -04002934 irsp = &(iocbq->iocb);
James Smart76bb24e2007-10-27 13:38:00 -04002935 if (irsp->ulpBdeCount != 0) {
2936 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2937 irsp->un.ulpWord[3]);
James Smart9c2face2008-01-11 01:53:18 -05002938 if (!iocbq->context2)
James Smart76bb24e2007-10-27 13:38:00 -04002939 lpfc_printf_log(phba,
2940 KERN_ERR,
2941 LOG_SLI,
2942 "0343 Ring %d Cannot find "
2943 "buffer for an unsolicited iocb"
2944 ". tag 0x%x\n", pring->ringno,
2945 irsp->un.ulpWord[3]);
2946 }
2947 if (irsp->ulpBdeCount == 2) {
2948 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2949 irsp->unsli3.sli3Words[7]);
James Smart9c2face2008-01-11 01:53:18 -05002950 if (!iocbq->context3)
James Smart76bb24e2007-10-27 13:38:00 -04002951 lpfc_printf_log(phba,
2952 KERN_ERR,
2953 LOG_SLI,
2954 "0344 Ring %d Cannot find "
2955 "buffer for an unsolicited "
2956 "iocb. tag 0x%x\n",
2957 pring->ringno,
2958 irsp->unsli3.sli3Words[7]);
2959 }
2960 }
James Smart92d7f7b2007-06-17 19:56:38 -05002961 }
James Smart9c2face2008-01-11 01:53:18 -05002962 if (irsp->ulpBdeCount != 0 &&
2963 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2964 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2965 int found = 0;
2966
2967 /* search continue save q for same XRI */
2968 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
James Smart7851fe22011-07-22 18:36:52 -04002969 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2970 saveq->iocb.unsli3.rcvsli3.ox_id) {
James Smart9c2face2008-01-11 01:53:18 -05002971 list_add_tail(&saveq->list, &iocbq->list);
2972 found = 1;
2973 break;
2974 }
2975 }
2976 if (!found)
2977 list_add_tail(&saveq->clist,
2978 &pring->iocb_continue_saveq);
2979 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2980 list_del_init(&iocbq->clist);
2981 saveq = iocbq;
2982 irsp = &(saveq->iocb);
2983 } else
2984 return 0;
2985 }
2986 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2987 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2988 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002989 Rctl = FC_RCTL_ELS_REQ;
2990 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002991 } else {
2992 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2993 Rctl = w5p->hcsw.Rctl;
2994 Type = w5p->hcsw.Type;
2995
2996 /* Firmware Workaround */
2997 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2998 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2999 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04003000 Rctl = FC_RCTL_ELS_REQ;
3001 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05003002 w5p->hcsw.Rctl = Rctl;
3003 w5p->hcsw.Type = Type;
3004 }
3005 }
James Smart92d7f7b2007-06-17 19:56:38 -05003006
James Smart3772a992009-05-22 14:50:54 -04003007 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
James Smart92d7f7b2007-06-17 19:56:38 -05003008 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003009 "0313 Ring %d handler: unexpected Rctl x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05003010 "Type x%x received\n",
James Smarte8b62012007-08-02 11:10:09 -04003011 pring->ringno, Rctl, Type);
James Smart3772a992009-05-22 14:50:54 -04003012
James Smart92d7f7b2007-06-17 19:56:38 -05003013 return 1;
dea31012005-04-17 16:05:31 -05003014}
3015
James Smarte59058c2008-08-24 21:49:00 -04003016/**
James Smart3621a712009-04-06 18:47:14 -04003017 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
James Smarte59058c2008-08-24 21:49:00 -04003018 * @phba: Pointer to HBA context object.
3019 * @pring: Pointer to driver SLI ring object.
3020 * @prspiocb: Pointer to response iocb object.
3021 *
3022 * This function looks up the iocb_lookup table to get the command iocb
3023 * corresponding to the given response iocb using the iotag of the
James Smarte2a8be52019-05-06 17:26:47 -07003024 * response iocb. The driver calls this function with the hbalock held
3025 * for SLI3 ports or the ring lock held for SLI4 ports.
James Smarte59058c2008-08-24 21:49:00 -04003026 * This function returns the command iocb object if it finds the command
3027 * iocb else returns NULL.
3028 **/
dea31012005-04-17 16:05:31 -05003029static struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05003030lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3031 struct lpfc_sli_ring *pring,
3032 struct lpfc_iocbq *prspiocb)
dea31012005-04-17 16:05:31 -05003033{
dea31012005-04-17 16:05:31 -05003034 struct lpfc_iocbq *cmd_iocb = NULL;
3035 uint16_t iotag;
James Smarte2a8be52019-05-06 17:26:47 -07003036 spinlock_t *temp_lock = NULL;
3037 unsigned long iflag = 0;
dea31012005-04-17 16:05:31 -05003038
James Smarte2a8be52019-05-06 17:26:47 -07003039 if (phba->sli_rev == LPFC_SLI_REV4)
3040 temp_lock = &pring->ring_lock;
3041 else
3042 temp_lock = &phba->hbalock;
3043
3044 spin_lock_irqsave(temp_lock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05003045 iotag = prspiocb->iocb.ulpIoTag;
dea31012005-04-17 16:05:31 -05003046
James Bottomley604a3e32005-10-29 10:28:33 -05003047 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3048 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04003049 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
James Smart89533e92016-10-13 15:06:15 -07003050 /* remove from txcmpl queue list */
3051 list_del_init(&cmd_iocb->list);
James Smart4f2e66c2012-05-09 21:17:07 -04003052 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartc4908502019-01-28 11:14:28 -08003053 pring->txcmplq_cnt--;
James Smarte2a8be52019-05-06 17:26:47 -07003054 spin_unlock_irqrestore(temp_lock, iflag);
James Smart89533e92016-10-13 15:06:15 -07003055 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04003056 }
dea31012005-04-17 16:05:31 -05003057 }
3058
James Smarte2a8be52019-05-06 17:26:47 -07003059 spin_unlock_irqrestore(temp_lock, iflag);
dea31012005-04-17 16:05:31 -05003060 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart89533e92016-10-13 15:06:15 -07003061 "0317 iotag x%x is out of "
James Bottomley604a3e32005-10-29 10:28:33 -05003062 "range: max iotag x%x wd0 x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003063 iotag, phba->sli.last_iotag,
James Bottomley604a3e32005-10-29 10:28:33 -05003064 *(((uint32_t *) &prspiocb->iocb) + 7));
dea31012005-04-17 16:05:31 -05003065 return NULL;
3066}
3067
James Smarte59058c2008-08-24 21:49:00 -04003068/**
James Smart3772a992009-05-22 14:50:54 -04003069 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3070 * @phba: Pointer to HBA context object.
3071 * @pring: Pointer to driver SLI ring object.
3072 * @iotag: IOCB tag.
3073 *
3074 * This function looks up the iocb_lookup table to get the command iocb
James Smarte2a8be52019-05-06 17:26:47 -07003075 * corresponding to the given iotag. The driver calls this function with
3076 * the ring lock held because this function is an SLI4 port only helper.
James Smart3772a992009-05-22 14:50:54 -04003077 * This function returns the command iocb object if it finds the command
3078 * iocb else returns NULL.
3079 **/
3080static struct lpfc_iocbq *
3081lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3082 struct lpfc_sli_ring *pring, uint16_t iotag)
3083{
James Smart895427b2017-02-12 13:52:30 -08003084 struct lpfc_iocbq *cmd_iocb = NULL;
James Smarte2a8be52019-05-06 17:26:47 -07003085 spinlock_t *temp_lock = NULL;
3086 unsigned long iflag = 0;
James Smart3772a992009-05-22 14:50:54 -04003087
James Smarte2a8be52019-05-06 17:26:47 -07003088 if (phba->sli_rev == LPFC_SLI_REV4)
3089 temp_lock = &pring->ring_lock;
3090 else
3091 temp_lock = &phba->hbalock;
3092
3093 spin_lock_irqsave(temp_lock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003094 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3095 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04003096 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3097 /* remove from txcmpl queue list */
3098 list_del_init(&cmd_iocb->list);
3099 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartc4908502019-01-28 11:14:28 -08003100 pring->txcmplq_cnt--;
James Smarte2a8be52019-05-06 17:26:47 -07003101 spin_unlock_irqrestore(temp_lock, iflag);
James Smart4f2e66c2012-05-09 21:17:07 -04003102 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04003103 }
James Smart3772a992009-05-22 14:50:54 -04003104 }
James Smart89533e92016-10-13 15:06:15 -07003105
James Smarte2a8be52019-05-06 17:26:47 -07003106 spin_unlock_irqrestore(temp_lock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08003108 "0372 iotag x%x lookup error: max iotag (x%x) "
3109 "iocb_flag x%x\n",
3110 iotag, phba->sli.last_iotag,
3111 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
James Smart3772a992009-05-22 14:50:54 -04003112 return NULL;
3113}
3114
3115/**
James Smart3621a712009-04-06 18:47:14 -04003116 * lpfc_sli_process_sol_iocb - process solicited iocb completion
James Smarte59058c2008-08-24 21:49:00 -04003117 * @phba: Pointer to HBA context object.
3118 * @pring: Pointer to driver SLI ring object.
3119 * @saveq: Pointer to the response iocb to be processed.
3120 *
3121 * This function is called by the ring event handler for non-fcp
3122 * rings when there is a new response iocb in the response ring.
3123 * The caller is not required to hold any locks. This function
3124 * gets the command iocb associated with the response iocb and
3125 * calls the completion handler for the command iocb. If there
3126 * is no completion handler, the function will free the resources
3127 * associated with command iocb. If the response iocb is for
3128 * an already aborted command iocb, the status of the completion
3129 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3130 * This function always returns 1.
3131 **/
dea31012005-04-17 16:05:31 -05003132static int
James Smart2e0fef82007-06-17 19:56:36 -05003133lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea31012005-04-17 16:05:31 -05003134 struct lpfc_iocbq *saveq)
3135{
James Smart2e0fef82007-06-17 19:56:36 -05003136 struct lpfc_iocbq *cmdiocbp;
dea31012005-04-17 16:05:31 -05003137 int rc = 1;
3138 unsigned long iflag;
3139
James Bottomley604a3e32005-10-29 10:28:33 -05003140 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
dea31012005-04-17 16:05:31 -05003141 if (cmdiocbp) {
3142 if (cmdiocbp->iocb_cmpl) {
3143 /*
James Smartea2151b2008-09-07 11:52:10 -04003144 * If an ELS command failed send an event to mgmt
3145 * application.
3146 */
3147 if (saveq->iocb.ulpStatus &&
3148 (pring->ringno == LPFC_ELS_RING) &&
3149 (cmdiocbp->iocb.ulpCommand ==
3150 CMD_ELS_REQUEST64_CR))
3151 lpfc_send_els_failure_event(phba,
3152 cmdiocbp, saveq);
3153
3154 /*
dea31012005-04-17 16:05:31 -05003155 * Post all ELS completions to the worker thread.
3156 * All other are passed to the completion callback.
3157 */
3158 if (pring->ringno == LPFC_ELS_RING) {
James Smart341af102010-01-26 23:07:37 -05003159 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3160 (cmdiocbp->iocb_flag &
3161 LPFC_DRIVER_ABORTED)) {
3162 spin_lock_irqsave(&phba->hbalock,
3163 iflag);
James Smart07951072007-04-25 09:51:38 -04003164 cmdiocbp->iocb_flag &=
3165 ~LPFC_DRIVER_ABORTED;
James Smart341af102010-01-26 23:07:37 -05003166 spin_unlock_irqrestore(&phba->hbalock,
3167 iflag);
James Smart07951072007-04-25 09:51:38 -04003168 saveq->iocb.ulpStatus =
3169 IOSTAT_LOCAL_REJECT;
3170 saveq->iocb.un.ulpWord[4] =
3171 IOERR_SLI_ABORTED;
James Smart0ff10d42008-01-11 01:52:36 -05003172
3173 /* Firmware could still be in progress
3174 * of DMAing payload, so don't free data
3175 * buffer till after a hbeat.
3176 */
James Smart341af102010-01-26 23:07:37 -05003177 spin_lock_irqsave(&phba->hbalock,
3178 iflag);
James Smart0ff10d42008-01-11 01:52:36 -05003179 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
James Smart341af102010-01-26 23:07:37 -05003180 spin_unlock_irqrestore(&phba->hbalock,
3181 iflag);
3182 }
James Smart0f65ff62010-02-26 14:14:23 -05003183 if (phba->sli_rev == LPFC_SLI_REV4) {
3184 if (saveq->iocb_flag &
3185 LPFC_EXCHANGE_BUSY) {
3186 /* Set cmdiocb flag for the
3187 * exchange busy so sgl (xri)
3188 * will not be released until
3189 * the abort xri is received
3190 * from hba.
3191 */
3192 spin_lock_irqsave(
3193 &phba->hbalock, iflag);
3194 cmdiocbp->iocb_flag |=
3195 LPFC_EXCHANGE_BUSY;
3196 spin_unlock_irqrestore(
3197 &phba->hbalock, iflag);
3198 }
3199 if (cmdiocbp->iocb_flag &
3200 LPFC_DRIVER_ABORTED) {
3201 /*
3202 * Clear LPFC_DRIVER_ABORTED
3203 * bit in case it was driver
3204 * initiated abort.
3205 */
3206 spin_lock_irqsave(
3207 &phba->hbalock, iflag);
3208 cmdiocbp->iocb_flag &=
3209 ~LPFC_DRIVER_ABORTED;
3210 spin_unlock_irqrestore(
3211 &phba->hbalock, iflag);
3212 cmdiocbp->iocb.ulpStatus =
3213 IOSTAT_LOCAL_REJECT;
3214 cmdiocbp->iocb.un.ulpWord[4] =
3215 IOERR_ABORT_REQUESTED;
3216 /*
3217 * For SLI4, irsiocb contains
3218 * NO_XRI in sli_xritag, it
3219 * shall not affect releasing
3220 * sgl (xri) process.
3221 */
3222 saveq->iocb.ulpStatus =
3223 IOSTAT_LOCAL_REJECT;
3224 saveq->iocb.un.ulpWord[4] =
3225 IOERR_SLI_ABORTED;
3226 spin_lock_irqsave(
3227 &phba->hbalock, iflag);
3228 saveq->iocb_flag |=
3229 LPFC_DELAY_MEM_FREE;
3230 spin_unlock_irqrestore(
3231 &phba->hbalock, iflag);
3232 }
James Smart07951072007-04-25 09:51:38 -04003233 }
dea31012005-04-17 16:05:31 -05003234 }
James Smart2e0fef82007-06-17 19:56:36 -05003235 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
James Bottomley604a3e32005-10-29 10:28:33 -05003236 } else
3237 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea31012005-04-17 16:05:31 -05003238 } else {
3239 /*
3240 * Unknown initiating command based on the response iotag.
3241 * This could be the case on the ELS ring because of
3242 * lpfc_els_abort().
3243 */
3244 if (pring->ringno != LPFC_ELS_RING) {
3245 /*
3246 * Ring <ringno> handler: unexpected completion IoTag
3247 * <IoTag>
3248 */
James Smarta257bf92009-04-06 18:48:10 -04003249 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003250 "0322 Ring %d handler: "
3251 "unexpected completion IoTag x%x "
3252 "Data: x%x x%x x%x x%x\n",
3253 pring->ringno,
3254 saveq->iocb.ulpIoTag,
3255 saveq->iocb.ulpStatus,
3256 saveq->iocb.un.ulpWord[4],
3257 saveq->iocb.ulpCommand,
3258 saveq->iocb.ulpContext);
dea31012005-04-17 16:05:31 -05003259 }
3260 }
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04003261
dea31012005-04-17 16:05:31 -05003262 return rc;
3263}
3264
James Smarte59058c2008-08-24 21:49:00 -04003265/**
James Smart3621a712009-04-06 18:47:14 -04003266 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
James Smarte59058c2008-08-24 21:49:00 -04003267 * @phba: Pointer to HBA context object.
3268 * @pring: Pointer to driver SLI ring object.
3269 *
3270 * This function is called from the iocb ring event handlers when
3271 * put pointer is ahead of the get pointer for a ring. This function signal
3272 * an error attention condition to the worker thread and the worker
3273 * thread will transition the HBA to offline state.
3274 **/
James Smart2e0fef82007-06-17 19:56:36 -05003275static void
3276lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003277{
James Smart34b02dc2008-08-24 21:49:55 -04003278 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003279 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003280 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003281 * rsp ring <portRspMax>
3282 */
3283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003284 "0312 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003285 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04003286 pring->ringno, le32_to_cpu(pgp->rspPutInx),
James Smart7e56aa22012-08-03 12:35:34 -04003287 pring->sli.sli3.numRiocb);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003288
James Smart2e0fef82007-06-17 19:56:36 -05003289 phba->link_state = LPFC_HBA_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003290
3291 /*
3292 * All error attention handlers are posted to
3293 * worker thread
3294 */
3295 phba->work_ha |= HA_ERATT;
3296 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05003297
James Smart5e9d9b82008-06-14 22:52:53 -04003298 lpfc_worker_wake_up(phba);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003299
3300 return;
3301}
3302
James Smarte59058c2008-08-24 21:49:00 -04003303/**
James Smart3621a712009-04-06 18:47:14 -04003304 * lpfc_poll_eratt - Error attention polling timer timeout handler
James Smart93996272008-08-24 21:50:30 -04003305 * @ptr: Pointer to address of HBA context object.
3306 *
3307 * This function is invoked by the Error Attention polling timer when the
3308 * timer times out. It will check the SLI Error Attention register for
3309 * possible attention events. If so, it will post an Error Attention event
3310 * and wake up worker thread to process it. Otherwise, it will set up the
3311 * Error Attention polling timer for the next poll.
3312 **/
Kees Cookf22eb4d2017-09-06 20:24:26 -07003313void lpfc_poll_eratt(struct timer_list *t)
James Smart93996272008-08-24 21:50:30 -04003314{
3315 struct lpfc_hba *phba;
James Smarteb016562014-09-03 12:58:06 -04003316 uint32_t eratt = 0;
James Smartaa6fbb72012-08-03 12:36:03 -04003317 uint64_t sli_intr, cnt;
James Smart93996272008-08-24 21:50:30 -04003318
Kees Cookf22eb4d2017-09-06 20:24:26 -07003319 phba = from_timer(phba, t, eratt_poll);
James Smart93996272008-08-24 21:50:30 -04003320
James Smartaa6fbb72012-08-03 12:36:03 -04003321 /* Here we will also keep track of interrupts per sec of the hba */
3322 sli_intr = phba->sli.slistat.sli_intr;
3323
3324 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3325 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3326 sli_intr);
3327 else
3328 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3329
James Smart65791f12016-07-06 12:35:56 -07003330 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3331 do_div(cnt, phba->eratt_poll_interval);
James Smartaa6fbb72012-08-03 12:36:03 -04003332 phba->sli.slistat.sli_ips = cnt;
3333
3334 phba->sli.slistat.sli_prev_intr = sli_intr;
3335
James Smart93996272008-08-24 21:50:30 -04003336 /* Check chip HA register for error event */
3337 eratt = lpfc_sli_check_eratt(phba);
3338
3339 if (eratt)
3340 /* Tell the worker thread there is work to do */
3341 lpfc_worker_wake_up(phba);
3342 else
3343 /* Restart the timer for next eratt poll */
James Smart256ec0d2013-04-17 20:14:58 -04003344 mod_timer(&phba->eratt_poll,
3345 jiffies +
James Smart65791f12016-07-06 12:35:56 -07003346 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smart93996272008-08-24 21:50:30 -04003347 return;
3348}
3349
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003350
James Smarte59058c2008-08-24 21:49:00 -04003351/**
James Smart3621a712009-04-06 18:47:14 -04003352 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
James Smarte59058c2008-08-24 21:49:00 -04003353 * @phba: Pointer to HBA context object.
3354 * @pring: Pointer to driver SLI ring object.
3355 * @mask: Host attention register mask for this ring.
3356 *
3357 * This function is called from the interrupt context when there is a ring
3358 * event for the fcp ring. The caller does not hold any lock.
3359 * The function processes each response iocb in the response ring until it
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003360 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
James Smarte59058c2008-08-24 21:49:00 -04003361 * LE bit set. The function will call the completion handler of the command iocb
3362 * if the response iocb indicates a completion for a command iocb or it is
3363 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3364 * function if this is an unsolicited iocb.
dea31012005-04-17 16:05:31 -05003365 * This routine presumes LPFC_FCP_RING handling and doesn't bother
James Smart45ed1192009-10-02 15:17:02 -04003366 * to check it explicitly.
3367 */
3368int
James Smart2e0fef82007-06-17 19:56:36 -05003369lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3370 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003371{
James Smart34b02dc2008-08-24 21:49:55 -04003372 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea31012005-04-17 16:05:31 -05003373 IOCB_t *irsp = NULL;
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003374 IOCB_t *entry = NULL;
dea31012005-04-17 16:05:31 -05003375 struct lpfc_iocbq *cmdiocbq = NULL;
3376 struct lpfc_iocbq rspiocbq;
dea31012005-04-17 16:05:31 -05003377 uint32_t status;
3378 uint32_t portRspPut, portRspMax;
3379 int rc = 1;
3380 lpfc_iocb_type type;
3381 unsigned long iflag;
3382 uint32_t rsp_cmpl = 0;
dea31012005-04-17 16:05:31 -05003383
James Smart2e0fef82007-06-17 19:56:36 -05003384 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003385 pring->stats.iocb_event++;
3386
dea31012005-04-17 16:05:31 -05003387 /*
3388 * The next available response entry should never exceed the maximum
3389 * entries. If it does, treat it as an adapter hardware error.
3390 */
James Smart7e56aa22012-08-03 12:35:34 -04003391 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003392 portRspPut = le32_to_cpu(pgp->rspPutInx);
3393 if (unlikely(portRspPut >= portRspMax)) {
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003394 lpfc_sli_rsp_pointers_error(phba, pring);
James Smart2e0fef82007-06-17 19:56:36 -05003395 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003396 return 1;
3397 }
James Smart45ed1192009-10-02 15:17:02 -04003398 if (phba->fcp_ring_in_use) {
3399 spin_unlock_irqrestore(&phba->hbalock, iflag);
3400 return 1;
3401 } else
3402 phba->fcp_ring_in_use = 1;
dea31012005-04-17 16:05:31 -05003403
3404 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003405 while (pring->sli.sli3.rspidx != portRspPut) {
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003406 /*
3407 * Fetch an entry off the ring and copy it into a local data
3408 * structure. The copy involves a byte-swap since the
3409 * network byte order and pci byte orders are different.
3410 */
James Smarted957682007-06-17 19:56:37 -05003411 entry = lpfc_resp_iocb(phba, pring);
James Smart858c9f62007-06-17 19:56:39 -05003412 phba->last_completion_time = jiffies;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003413
James Smart7e56aa22012-08-03 12:35:34 -04003414 if (++pring->sli.sli3.rspidx >= portRspMax)
3415 pring->sli.sli3.rspidx = 0;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003416
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003417 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3418 (uint32_t *) &rspiocbq.iocb,
James Smarted957682007-06-17 19:56:37 -05003419 phba->iocb_rsp_size);
James Smarta4bc3372006-12-02 13:34:16 -05003420 INIT_LIST_HEAD(&(rspiocbq.list));
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003421 irsp = &rspiocbq.iocb;
3422
dea31012005-04-17 16:05:31 -05003423 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3424 pring->stats.iocb_rsp++;
3425 rsp_cmpl++;
3426
3427 if (unlikely(irsp->ulpStatus)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003428 /*
3429 * If resource errors reported from HBA, reduce
3430 * queuedepths of the SCSI device.
3431 */
3432 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003433 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3434 IOERR_NO_RESOURCES)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003435 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003436 phba->lpfc_rampdown_queue_depth(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05003437 spin_lock_irqsave(&phba->hbalock, iflag);
3438 }
3439
dea31012005-04-17 16:05:31 -05003440 /* Rsp ring <ringno> error: IOCB */
3441 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003442 "0336 Rsp Ring %d error: IOCB Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05003443 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003444 pring->ringno,
James Smart92d7f7b2007-06-17 19:56:38 -05003445 irsp->un.ulpWord[0],
3446 irsp->un.ulpWord[1],
3447 irsp->un.ulpWord[2],
3448 irsp->un.ulpWord[3],
3449 irsp->un.ulpWord[4],
3450 irsp->un.ulpWord[5],
James Smartd7c255b2008-08-24 21:50:00 -04003451 *(uint32_t *)&irsp->un1,
3452 *((uint32_t *)&irsp->un1 + 1));
dea31012005-04-17 16:05:31 -05003453 }
3454
3455 switch (type) {
3456 case LPFC_ABORT_IOCB:
3457 case LPFC_SOL_IOCB:
3458 /*
3459 * Idle exchange closed via ABTS from port. No iocb
3460 * resources need to be recovered.
3461 */
3462 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
James Smartdca94792006-08-01 07:34:08 -04003463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003464 "0333 IOCB cmd 0x%x"
James Smartdca94792006-08-01 07:34:08 -04003465 " processed. Skipping"
James Smart92d7f7b2007-06-17 19:56:38 -05003466 " completion\n",
James Smartdca94792006-08-01 07:34:08 -04003467 irsp->ulpCommand);
dea31012005-04-17 16:05:31 -05003468 break;
3469 }
3470
James Smarte2a8be52019-05-06 17:26:47 -07003471 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05003472 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3473 &rspiocbq);
James Smarte2a8be52019-05-06 17:26:47 -07003474 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart0f65ff62010-02-26 14:14:23 -05003475 if (unlikely(!cmdiocbq))
3476 break;
3477 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3478 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3479 if (cmdiocbq->iocb_cmpl) {
3480 spin_unlock_irqrestore(&phba->hbalock, iflag);
3481 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3482 &rspiocbq);
3483 spin_lock_irqsave(&phba->hbalock, iflag);
3484 }
dea31012005-04-17 16:05:31 -05003485 break;
James Smarta4bc3372006-12-02 13:34:16 -05003486 case LPFC_UNSOL_IOCB:
James Smart2e0fef82007-06-17 19:56:36 -05003487 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003488 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
James Smart2e0fef82007-06-17 19:56:36 -05003489 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003490 break;
dea31012005-04-17 16:05:31 -05003491 default:
3492 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3493 char adaptermsg[LPFC_MAX_ADPTMSG];
3494 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3495 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3496 MAX_MSG_DATA);
Joe Perches898eb712007-10-18 03:06:30 -07003497 dev_warn(&((phba->pcidev)->dev),
3498 "lpfc%d: %s\n",
dea31012005-04-17 16:05:31 -05003499 phba->brd_no, adaptermsg);
3500 } else {
3501 /* Unknown IOCB command */
3502 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003503 "0334 Unknown IOCB command "
James Smart92d7f7b2007-06-17 19:56:38 -05003504 "Data: x%x, x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003505 type, irsp->ulpCommand,
James Smart92d7f7b2007-06-17 19:56:38 -05003506 irsp->ulpStatus,
3507 irsp->ulpIoTag,
3508 irsp->ulpContext);
dea31012005-04-17 16:05:31 -05003509 }
3510 break;
3511 }
3512
3513 /*
3514 * The response IOCB has been processed. Update the ring
3515 * pointer in SLIM. If the port response put pointer has not
3516 * been updated, sync the pgp->rspPutInx and fetch the new port
3517 * response put pointer.
3518 */
James Smart7e56aa22012-08-03 12:35:34 -04003519 writel(pring->sli.sli3.rspidx,
3520 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003521
James Smart7e56aa22012-08-03 12:35:34 -04003522 if (pring->sli.sli3.rspidx == portRspPut)
dea31012005-04-17 16:05:31 -05003523 portRspPut = le32_to_cpu(pgp->rspPutInx);
3524 }
3525
3526 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3527 pring->stats.iocb_rsp_full++;
3528 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3529 writel(status, phba->CAregaddr);
3530 readl(phba->CAregaddr);
3531 }
3532 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3533 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3534 pring->stats.iocb_cmd_empty++;
3535
3536 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003537 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003538 lpfc_sli_resume_iocb(phba, pring);
3539
3540 if ((pring->lpfc_sli_cmd_available))
3541 (pring->lpfc_sli_cmd_available) (phba, pring);
3542
3543 }
3544
James Smart45ed1192009-10-02 15:17:02 -04003545 phba->fcp_ring_in_use = 0;
James Smart2e0fef82007-06-17 19:56:36 -05003546 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003547 return rc;
3548}
3549
James Smarte59058c2008-08-24 21:49:00 -04003550/**
James Smart3772a992009-05-22 14:50:54 -04003551 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3552 * @phba: Pointer to HBA context object.
3553 * @pring: Pointer to driver SLI ring object.
3554 * @rspiocbp: Pointer to driver response IOCB object.
3555 *
3556 * This function is called from the worker thread when there is a slow-path
3557 * response IOCB to process. This function chains all the response iocbs until
3558 * seeing the iocb with the LE bit set. The function will call
3559 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3560 * completion of a command iocb. The function will call the
3561 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3562 * The function frees the resources or calls the completion handler if this
3563 * iocb is an abort completion. The function returns NULL when the response
3564 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3565 * this function shall chain the iocb on to the iocb_continueq and return the
3566 * response iocb passed in.
3567 **/
3568static struct lpfc_iocbq *
3569lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3570 struct lpfc_iocbq *rspiocbp)
3571{
3572 struct lpfc_iocbq *saveq;
3573 struct lpfc_iocbq *cmdiocbp;
3574 struct lpfc_iocbq *next_iocb;
3575 IOCB_t *irsp = NULL;
3576 uint32_t free_saveq;
3577 uint8_t iocb_cmd_type;
3578 lpfc_iocb_type type;
3579 unsigned long iflag;
3580 int rc;
3581
3582 spin_lock_irqsave(&phba->hbalock, iflag);
3583 /* First add the response iocb to the countinueq list */
3584 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3585 pring->iocb_continueq_cnt++;
3586
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02003587 /* Now, determine whether the list is completed for processing */
James Smart3772a992009-05-22 14:50:54 -04003588 irsp = &rspiocbp->iocb;
3589 if (irsp->ulpLe) {
3590 /*
3591 * By default, the driver expects to free all resources
3592 * associated with this iocb completion.
3593 */
3594 free_saveq = 1;
3595 saveq = list_get_first(&pring->iocb_continueq,
3596 struct lpfc_iocbq, list);
3597 irsp = &(saveq->iocb);
3598 list_del_init(&pring->iocb_continueq);
3599 pring->iocb_continueq_cnt = 0;
3600
3601 pring->stats.iocb_rsp++;
3602
3603 /*
3604 * If resource errors reported from HBA, reduce
3605 * queuedepths of the SCSI device.
3606 */
3607 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003608 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3609 IOERR_NO_RESOURCES)) {
James Smart3772a992009-05-22 14:50:54 -04003610 spin_unlock_irqrestore(&phba->hbalock, iflag);
3611 phba->lpfc_rampdown_queue_depth(phba);
3612 spin_lock_irqsave(&phba->hbalock, iflag);
3613 }
3614
3615 if (irsp->ulpStatus) {
3616 /* Rsp ring <ringno> error: IOCB */
3617 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3618 "0328 Rsp Ring %d error: "
3619 "IOCB Data: "
3620 "x%x x%x x%x x%x "
3621 "x%x x%x x%x x%x "
3622 "x%x x%x x%x x%x "
3623 "x%x x%x x%x x%x\n",
3624 pring->ringno,
3625 irsp->un.ulpWord[0],
3626 irsp->un.ulpWord[1],
3627 irsp->un.ulpWord[2],
3628 irsp->un.ulpWord[3],
3629 irsp->un.ulpWord[4],
3630 irsp->un.ulpWord[5],
3631 *(((uint32_t *) irsp) + 6),
3632 *(((uint32_t *) irsp) + 7),
3633 *(((uint32_t *) irsp) + 8),
3634 *(((uint32_t *) irsp) + 9),
3635 *(((uint32_t *) irsp) + 10),
3636 *(((uint32_t *) irsp) + 11),
3637 *(((uint32_t *) irsp) + 12),
3638 *(((uint32_t *) irsp) + 13),
3639 *(((uint32_t *) irsp) + 14),
3640 *(((uint32_t *) irsp) + 15));
3641 }
3642
3643 /*
3644 * Fetch the IOCB command type and call the correct completion
3645 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3646 * get freed back to the lpfc_iocb_list by the discovery
3647 * kernel thread.
3648 */
3649 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3650 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3651 switch (type) {
3652 case LPFC_SOL_IOCB:
3653 spin_unlock_irqrestore(&phba->hbalock, iflag);
3654 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3655 spin_lock_irqsave(&phba->hbalock, iflag);
3656 break;
3657
3658 case LPFC_UNSOL_IOCB:
3659 spin_unlock_irqrestore(&phba->hbalock, iflag);
3660 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3661 spin_lock_irqsave(&phba->hbalock, iflag);
3662 if (!rc)
3663 free_saveq = 0;
3664 break;
3665
3666 case LPFC_ABORT_IOCB:
3667 cmdiocbp = NULL;
James Smarte2a8be52019-05-06 17:26:47 -07003668 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3669 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003670 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3671 saveq);
James Smarte2a8be52019-05-06 17:26:47 -07003672 spin_lock_irqsave(&phba->hbalock, iflag);
3673 }
James Smart3772a992009-05-22 14:50:54 -04003674 if (cmdiocbp) {
3675 /* Call the specified completion routine */
3676 if (cmdiocbp->iocb_cmpl) {
3677 spin_unlock_irqrestore(&phba->hbalock,
3678 iflag);
3679 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3680 saveq);
3681 spin_lock_irqsave(&phba->hbalock,
3682 iflag);
3683 } else
3684 __lpfc_sli_release_iocbq(phba,
3685 cmdiocbp);
3686 }
3687 break;
3688
3689 case LPFC_UNKNOWN_IOCB:
3690 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3691 char adaptermsg[LPFC_MAX_ADPTMSG];
3692 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3693 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3694 MAX_MSG_DATA);
3695 dev_warn(&((phba->pcidev)->dev),
3696 "lpfc%d: %s\n",
3697 phba->brd_no, adaptermsg);
3698 } else {
3699 /* Unknown IOCB command */
3700 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3701 "0335 Unknown IOCB "
3702 "command Data: x%x "
3703 "x%x x%x x%x\n",
3704 irsp->ulpCommand,
3705 irsp->ulpStatus,
3706 irsp->ulpIoTag,
3707 irsp->ulpContext);
3708 }
3709 break;
3710 }
3711
3712 if (free_saveq) {
3713 list_for_each_entry_safe(rspiocbp, next_iocb,
3714 &saveq->list, list) {
James Smart61f35bf2013-05-31 17:03:48 -04003715 list_del_init(&rspiocbp->list);
James Smart3772a992009-05-22 14:50:54 -04003716 __lpfc_sli_release_iocbq(phba, rspiocbp);
3717 }
3718 __lpfc_sli_release_iocbq(phba, saveq);
3719 }
3720 rspiocbp = NULL;
3721 }
3722 spin_unlock_irqrestore(&phba->hbalock, iflag);
3723 return rspiocbp;
3724}
3725
3726/**
3727 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
James Smarte59058c2008-08-24 21:49:00 -04003728 * @phba: Pointer to HBA context object.
3729 * @pring: Pointer to driver SLI ring object.
3730 * @mask: Host attention register mask for this ring.
3731 *
James Smart3772a992009-05-22 14:50:54 -04003732 * This routine wraps the actual slow_ring event process routine from the
3733 * API jump table function pointer from the lpfc_hba struct.
James Smarte59058c2008-08-24 21:49:00 -04003734 **/
James Smart3772a992009-05-22 14:50:54 -04003735void
James Smart2e0fef82007-06-17 19:56:36 -05003736lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3737 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003738{
James Smart3772a992009-05-22 14:50:54 -04003739 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3740}
3741
3742/**
3743 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3744 * @phba: Pointer to HBA context object.
3745 * @pring: Pointer to driver SLI ring object.
3746 * @mask: Host attention register mask for this ring.
3747 *
3748 * This function is called from the worker thread when there is a ring event
3749 * for non-fcp rings. The caller does not hold any lock. The function will
3750 * remove each response iocb in the response ring and calls the handle
3751 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3752 **/
3753static void
3754lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3755 struct lpfc_sli_ring *pring, uint32_t mask)
3756{
James Smart34b02dc2008-08-24 21:49:55 -04003757 struct lpfc_pgp *pgp;
dea31012005-04-17 16:05:31 -05003758 IOCB_t *entry;
3759 IOCB_t *irsp = NULL;
3760 struct lpfc_iocbq *rspiocbp = NULL;
dea31012005-04-17 16:05:31 -05003761 uint32_t portRspPut, portRspMax;
dea31012005-04-17 16:05:31 -05003762 unsigned long iflag;
James Smart3772a992009-05-22 14:50:54 -04003763 uint32_t status;
dea31012005-04-17 16:05:31 -05003764
James Smart34b02dc2008-08-24 21:49:55 -04003765 pgp = &phba->port_gp[pring->ringno];
James Smart2e0fef82007-06-17 19:56:36 -05003766 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003767 pring->stats.iocb_event++;
3768
dea31012005-04-17 16:05:31 -05003769 /*
3770 * The next available response entry should never exceed the maximum
3771 * entries. If it does, treat it as an adapter hardware error.
3772 */
James Smart7e56aa22012-08-03 12:35:34 -04003773 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003774 portRspPut = le32_to_cpu(pgp->rspPutInx);
3775 if (portRspPut >= portRspMax) {
3776 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003777 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea31012005-04-17 16:05:31 -05003778 * rsp ring <portRspMax>
3779 */
James Smarted957682007-06-17 19:56:37 -05003780 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003781 "0303 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003782 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04003783 pring->ringno, portRspPut, portRspMax);
dea31012005-04-17 16:05:31 -05003784
James Smart2e0fef82007-06-17 19:56:36 -05003785 phba->link_state = LPFC_HBA_ERROR;
3786 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003787
3788 phba->work_hs = HS_FFER3;
3789 lpfc_handle_eratt(phba);
3790
James Smart3772a992009-05-22 14:50:54 -04003791 return;
dea31012005-04-17 16:05:31 -05003792 }
3793
3794 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003795 while (pring->sli.sli3.rspidx != portRspPut) {
dea31012005-04-17 16:05:31 -05003796 /*
3797 * Build a completion list and call the appropriate handler.
3798 * The process is to get the next available response iocb, get
3799 * a free iocb from the list, copy the response data into the
3800 * free iocb, insert to the continuation list, and update the
3801 * next response index to slim. This process makes response
3802 * iocb's in the ring available to DMA as fast as possible but
3803 * pays a penalty for a copy operation. Since the iocb is
3804 * only 32 bytes, this penalty is considered small relative to
3805 * the PCI reads for register values and a slim write. When
3806 * the ulpLe field is set, the entire Command has been
3807 * received.
3808 */
James Smarted957682007-06-17 19:56:37 -05003809 entry = lpfc_resp_iocb(phba, pring);
3810
James Smart858c9f62007-06-17 19:56:39 -05003811 phba->last_completion_time = jiffies;
James Smart2e0fef82007-06-17 19:56:36 -05003812 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05003813 if (rspiocbp == NULL) {
3814 printk(KERN_ERR "%s: out of buffers! Failing "
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07003815 "completion.\n", __func__);
dea31012005-04-17 16:05:31 -05003816 break;
3817 }
3818
James Smarted957682007-06-17 19:56:37 -05003819 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3820 phba->iocb_rsp_size);
dea31012005-04-17 16:05:31 -05003821 irsp = &rspiocbp->iocb;
3822
James Smart7e56aa22012-08-03 12:35:34 -04003823 if (++pring->sli.sli3.rspidx >= portRspMax)
3824 pring->sli.sli3.rspidx = 0;
dea31012005-04-17 16:05:31 -05003825
James Smarta58cbd52007-08-02 11:09:43 -04003826 if (pring->ringno == LPFC_ELS_RING) {
3827 lpfc_debugfs_slow_ring_trc(phba,
3828 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3829 *(((uint32_t *) irsp) + 4),
3830 *(((uint32_t *) irsp) + 6),
3831 *(((uint32_t *) irsp) + 7));
3832 }
3833
James Smart7e56aa22012-08-03 12:35:34 -04003834 writel(pring->sli.sli3.rspidx,
3835 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003836
James Smart3772a992009-05-22 14:50:54 -04003837 spin_unlock_irqrestore(&phba->hbalock, iflag);
3838 /* Handle the response IOCB */
3839 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3840 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003841
3842 /*
3843 * If the port response put pointer has not been updated, sync
3844 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3845 * response put pointer.
3846 */
James Smart7e56aa22012-08-03 12:35:34 -04003847 if (pring->sli.sli3.rspidx == portRspPut) {
dea31012005-04-17 16:05:31 -05003848 portRspPut = le32_to_cpu(pgp->rspPutInx);
3849 }
James Smart7e56aa22012-08-03 12:35:34 -04003850 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea31012005-04-17 16:05:31 -05003851
James Smart92d7f7b2007-06-17 19:56:38 -05003852 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea31012005-04-17 16:05:31 -05003853 /* At least one response entry has been freed */
3854 pring->stats.iocb_rsp_full++;
3855 /* SET RxRE_RSP in Chip Att register */
3856 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3857 writel(status, phba->CAregaddr);
3858 readl(phba->CAregaddr); /* flush */
3859 }
3860 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3861 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3862 pring->stats.iocb_cmd_empty++;
3863
3864 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003865 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003866 lpfc_sli_resume_iocb(phba, pring);
3867
3868 if ((pring->lpfc_sli_cmd_available))
3869 (pring->lpfc_sli_cmd_available) (phba, pring);
3870
3871 }
3872
James Smart2e0fef82007-06-17 19:56:36 -05003873 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003874 return;
dea31012005-04-17 16:05:31 -05003875}
3876
James Smarte59058c2008-08-24 21:49:00 -04003877/**
James Smart4f774512009-05-22 14:52:35 -04003878 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3879 * @phba: Pointer to HBA context object.
3880 * @pring: Pointer to driver SLI ring object.
3881 * @mask: Host attention register mask for this ring.
3882 *
3883 * This function is called from the worker thread when there is a pending
3884 * ELS response iocb on the driver internal slow-path response iocb worker
3885 * queue. The caller does not hold any lock. The function will remove each
3886 * response iocb from the response worker queue and calls the handle
3887 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3888 **/
3889static void
3890lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3891 struct lpfc_sli_ring *pring, uint32_t mask)
3892{
3893 struct lpfc_iocbq *irspiocbq;
James Smart4d9ab992009-10-02 15:16:39 -04003894 struct hbq_dmabuf *dmabuf;
3895 struct lpfc_cq_event *cq_event;
James Smart4f774512009-05-22 14:52:35 -04003896 unsigned long iflag;
James Smart0ef01a22018-09-10 10:30:45 -07003897 int count = 0;
James Smart4f774512009-05-22 14:52:35 -04003898
James Smart45ed1192009-10-02 15:17:02 -04003899 spin_lock_irqsave(&phba->hbalock, iflag);
3900 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3901 spin_unlock_irqrestore(&phba->hbalock, iflag);
3902 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
James Smart4f774512009-05-22 14:52:35 -04003903 /* Get the response iocb from the head of work queue */
3904 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart45ed1192009-10-02 15:17:02 -04003905 list_remove_head(&phba->sli4_hba.sp_queue_event,
James Smart4d9ab992009-10-02 15:16:39 -04003906 cq_event, struct lpfc_cq_event, list);
James Smart4f774512009-05-22 14:52:35 -04003907 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart4d9ab992009-10-02 15:16:39 -04003908
3909 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3910 case CQE_CODE_COMPL_WQE:
3911 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3912 cq_event);
James Smart45ed1192009-10-02 15:17:02 -04003913 /* Translate ELS WCQE to response IOCBQ */
3914 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3915 irspiocbq);
3916 if (irspiocbq)
3917 lpfc_sli_sp_handle_rspiocb(phba, pring,
3918 irspiocbq);
James Smart0ef01a22018-09-10 10:30:45 -07003919 count++;
James Smart4d9ab992009-10-02 15:16:39 -04003920 break;
3921 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -04003922 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -04003923 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3924 cq_event);
3925 lpfc_sli4_handle_received_buffer(phba, dmabuf);
James Smart0ef01a22018-09-10 10:30:45 -07003926 count++;
James Smart4d9ab992009-10-02 15:16:39 -04003927 break;
3928 default:
3929 break;
3930 }
James Smart0ef01a22018-09-10 10:30:45 -07003931
3932 /* Limit the number of events to 64 to avoid soft lockups */
3933 if (count == 64)
3934 break;
James Smart4f774512009-05-22 14:52:35 -04003935 }
3936}
3937
3938/**
James Smart3621a712009-04-06 18:47:14 -04003939 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
James Smarte59058c2008-08-24 21:49:00 -04003940 * @phba: Pointer to HBA context object.
3941 * @pring: Pointer to driver SLI ring object.
3942 *
3943 * This function aborts all iocbs in the given ring and frees all the iocb
3944 * objects in txq. This function issues an abort iocb for all the iocb commands
3945 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3946 * the return of this function. The caller is not required to hold any locks.
3947 **/
James Smart2e0fef82007-06-17 19:56:36 -05003948void
dea31012005-04-17 16:05:31 -05003949lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3950{
James Smart2534ba72007-04-25 09:52:20 -04003951 LIST_HEAD(completions);
dea31012005-04-17 16:05:31 -05003952 struct lpfc_iocbq *iocb, *next_iocb;
dea31012005-04-17 16:05:31 -05003953
James Smart92d7f7b2007-06-17 19:56:38 -05003954 if (pring->ringno == LPFC_ELS_RING) {
3955 lpfc_fabric_abort_hba(phba);
3956 }
3957
dea31012005-04-17 16:05:31 -05003958 /* Error everything on txq and txcmplq
3959 * First do the txq.
3960 */
James Smartdb55fba2014-04-04 13:52:02 -04003961 if (phba->sli_rev >= LPFC_SLI_REV4) {
3962 spin_lock_irq(&pring->ring_lock);
3963 list_splice_init(&pring->txq, &completions);
3964 pring->txq_cnt = 0;
3965 spin_unlock_irq(&pring->ring_lock);
dea31012005-04-17 16:05:31 -05003966
James Smartdb55fba2014-04-04 13:52:02 -04003967 spin_lock_irq(&phba->hbalock);
3968 /* Next issue ABTS for everything on the txcmplq */
3969 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3970 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3971 spin_unlock_irq(&phba->hbalock);
3972 } else {
3973 spin_lock_irq(&phba->hbalock);
3974 list_splice_init(&pring->txq, &completions);
3975 pring->txq_cnt = 0;
James Smart2534ba72007-04-25 09:52:20 -04003976
James Smartdb55fba2014-04-04 13:52:02 -04003977 /* Next issue ABTS for everything on the txcmplq */
3978 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3979 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3980 spin_unlock_irq(&phba->hbalock);
3981 }
James Smart2534ba72007-04-25 09:52:20 -04003982
James Smarta257bf92009-04-06 18:48:10 -04003983 /* Cancel all the IOCBs from the completions list */
3984 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3985 IOERR_SLI_ABORTED);
dea31012005-04-17 16:05:31 -05003986}
3987
James Smarte59058c2008-08-24 21:49:00 -04003988/**
James Smartdb55fba2014-04-04 13:52:02 -04003989 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3990 * @phba: Pointer to HBA context object.
3991 * @pring: Pointer to driver SLI ring object.
3992 *
3993 * This function aborts all iocbs in FCP rings and frees all the iocb
3994 * objects in txq. This function issues an abort iocb for all the iocb commands
3995 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3996 * the return of this function. The caller is not required to hold any locks.
3997 **/
3998void
3999lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4000{
4001 struct lpfc_sli *psli = &phba->sli;
4002 struct lpfc_sli_ring *pring;
4003 uint32_t i;
4004
4005 /* Look on all the FCP Rings for the iotag */
4006 if (phba->sli_rev >= LPFC_SLI_REV4) {
James Smartcdb42be2019-01-28 11:14:21 -08004007 for (i = 0; i < phba->cfg_hdw_queue; i++) {
James Smartc00f62e2019-08-14 16:57:11 -07004008 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
James Smartdb55fba2014-04-04 13:52:02 -04004009 lpfc_sli_abort_iocb_ring(phba, pring);
4010 }
4011 } else {
James Smart895427b2017-02-12 13:52:30 -08004012 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04004013 lpfc_sli_abort_iocb_ring(phba, pring);
4014 }
4015}
4016
James Smart895427b2017-02-12 13:52:30 -08004017/**
James Smartc00f62e2019-08-14 16:57:11 -07004018 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
James Smarta8e497d2008-08-24 21:50:11 -04004019 * @phba: Pointer to HBA context object.
4020 *
James Smartc00f62e2019-08-14 16:57:11 -07004021 * This function flushes all iocbs in the IO ring and frees all the iocb
James Smarta8e497d2008-08-24 21:50:11 -04004022 * objects in txq and txcmplq. This function will not issue abort iocbs
4023 * for all the iocb commands in txcmplq, they will just be returned with
4024 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4025 * slot has been permanently disabled.
4026 **/
4027void
James Smartc00f62e2019-08-14 16:57:11 -07004028lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
James Smarta8e497d2008-08-24 21:50:11 -04004029{
4030 LIST_HEAD(txq);
4031 LIST_HEAD(txcmplq);
James Smarta8e497d2008-08-24 21:50:11 -04004032 struct lpfc_sli *psli = &phba->sli;
4033 struct lpfc_sli_ring *pring;
James Smartdb55fba2014-04-04 13:52:02 -04004034 uint32_t i;
James Smartc1dd9112018-01-30 15:58:57 -08004035 struct lpfc_iocbq *piocb, *next_iocb;
James Smarta8e497d2008-08-24 21:50:11 -04004036
4037 spin_lock_irq(&phba->hbalock);
James Smart4cd70892020-03-22 11:12:57 -07004038 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4039 !phba->sli4_hba.hdwq) {
4040 spin_unlock_irq(&phba->hbalock);
4041 return;
4042 }
James Smart4f2e66c2012-05-09 21:17:07 -04004043 /* Indicate the I/O queues are flushed */
James Smartc00f62e2019-08-14 16:57:11 -07004044 phba->hba_flag |= HBA_IOQ_FLUSH;
James Smarta8e497d2008-08-24 21:50:11 -04004045 spin_unlock_irq(&phba->hbalock);
4046
James Smartdb55fba2014-04-04 13:52:02 -04004047 /* Look on all the FCP Rings for the iotag */
4048 if (phba->sli_rev >= LPFC_SLI_REV4) {
James Smartcdb42be2019-01-28 11:14:21 -08004049 for (i = 0; i < phba->cfg_hdw_queue; i++) {
James Smartc00f62e2019-08-14 16:57:11 -07004050 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
James Smarta8e497d2008-08-24 21:50:11 -04004051
James Smartdb55fba2014-04-04 13:52:02 -04004052 spin_lock_irq(&pring->ring_lock);
4053 /* Retrieve everything on txq */
4054 list_splice_init(&pring->txq, &txq);
James Smartc1dd9112018-01-30 15:58:57 -08004055 list_for_each_entry_safe(piocb, next_iocb,
4056 &pring->txcmplq, list)
4057 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartdb55fba2014-04-04 13:52:02 -04004058 /* Retrieve everything on the txcmplq */
4059 list_splice_init(&pring->txcmplq, &txcmplq);
4060 pring->txq_cnt = 0;
4061 pring->txcmplq_cnt = 0;
4062 spin_unlock_irq(&pring->ring_lock);
4063
4064 /* Flush the txq */
4065 lpfc_sli_cancel_iocbs(phba, &txq,
4066 IOSTAT_LOCAL_REJECT,
4067 IOERR_SLI_DOWN);
4068 /* Flush the txcmpq */
4069 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4070 IOSTAT_LOCAL_REJECT,
4071 IOERR_SLI_DOWN);
4072 }
4073 } else {
James Smart895427b2017-02-12 13:52:30 -08004074 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04004075
4076 spin_lock_irq(&phba->hbalock);
4077 /* Retrieve everything on txq */
4078 list_splice_init(&pring->txq, &txq);
James Smartc1dd9112018-01-30 15:58:57 -08004079 list_for_each_entry_safe(piocb, next_iocb,
4080 &pring->txcmplq, list)
4081 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartdb55fba2014-04-04 13:52:02 -04004082 /* Retrieve everything on the txcmplq */
4083 list_splice_init(&pring->txcmplq, &txcmplq);
4084 pring->txq_cnt = 0;
4085 pring->txcmplq_cnt = 0;
4086 spin_unlock_irq(&phba->hbalock);
4087
4088 /* Flush the txq */
4089 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4090 IOERR_SLI_DOWN);
4091 /* Flush the txcmpq */
4092 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4093 IOERR_SLI_DOWN);
4094 }
James Smarta8e497d2008-08-24 21:50:11 -04004095}
4096
4097/**
James Smart3772a992009-05-22 14:50:54 -04004098 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
James Smarte59058c2008-08-24 21:49:00 -04004099 * @phba: Pointer to HBA context object.
4100 * @mask: Bit mask to be checked.
4101 *
4102 * This function reads the host status register and compares
4103 * with the provided bit mask to check if HBA completed
4104 * the restart. This function will wait in a loop for the
4105 * HBA to complete restart. If the HBA does not restart within
4106 * 15 iterations, the function will reset the HBA again. The
4107 * function returns 1 when HBA fail to restart otherwise returns
4108 * zero.
4109 **/
James Smart3772a992009-05-22 14:50:54 -04004110static int
4111lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea31012005-04-17 16:05:31 -05004112{
Jamie Wellnitz41415862006-02-28 19:25:27 -05004113 uint32_t status;
4114 int i = 0;
4115 int retval = 0;
dea31012005-04-17 16:05:31 -05004116
Jamie Wellnitz41415862006-02-28 19:25:27 -05004117 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004118 if (lpfc_readl(phba->HSregaddr, &status))
4119 return 1;
dea31012005-04-17 16:05:31 -05004120
Jamie Wellnitz41415862006-02-28 19:25:27 -05004121 /*
4122 * Check status register every 100ms for 5 retries, then every
4123 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4124 * every 2.5 sec for 4.
4125 * Break our of the loop if errors occurred during init.
4126 */
4127 while (((status & mask) != mask) &&
4128 !(status & HS_FFERM) &&
4129 i++ < 20) {
dea31012005-04-17 16:05:31 -05004130
Jamie Wellnitz41415862006-02-28 19:25:27 -05004131 if (i <= 5)
4132 msleep(10);
4133 else if (i <= 10)
4134 msleep(500);
4135 else
4136 msleep(2500);
dea31012005-04-17 16:05:31 -05004137
Jamie Wellnitz41415862006-02-28 19:25:27 -05004138 if (i == 15) {
James Smart2e0fef82007-06-17 19:56:36 -05004139 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05004140 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004141 lpfc_sli_brdrestart(phba);
4142 }
4143 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004144 if (lpfc_readl(phba->HSregaddr, &status)) {
4145 retval = 1;
4146 break;
4147 }
dea31012005-04-17 16:05:31 -05004148 }
dea31012005-04-17 16:05:31 -05004149
Jamie Wellnitz41415862006-02-28 19:25:27 -05004150 /* Check to see if any errors occurred during init */
4151 if ((status & HS_FFERM) || (i >= 20)) {
James Smarte40a02c2010-02-26 14:13:54 -05004152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4153 "2751 Adapter failed to restart, "
4154 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4155 status,
4156 readl(phba->MBslimaddr + 0xa8),
4157 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004158 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004159 retval = 1;
4160 }
dea31012005-04-17 16:05:31 -05004161
Jamie Wellnitz41415862006-02-28 19:25:27 -05004162 return retval;
dea31012005-04-17 16:05:31 -05004163}
4164
James Smartda0436e2009-05-22 14:51:39 -04004165/**
4166 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4167 * @phba: Pointer to HBA context object.
4168 * @mask: Bit mask to be checked.
4169 *
4170 * This function checks the host status register to check if HBA is
4171 * ready. This function will wait in a loop for the HBA to be ready
4172 * If the HBA is not ready , the function will will reset the HBA PCI
4173 * function again. The function returns 1 when HBA fail to be ready
4174 * otherwise returns zero.
4175 **/
4176static int
4177lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4178{
4179 uint32_t status;
4180 int retval = 0;
4181
4182 /* Read the HBA Host Status Register */
4183 status = lpfc_sli4_post_status_check(phba);
4184
4185 if (status) {
4186 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4187 lpfc_sli_brdrestart(phba);
4188 status = lpfc_sli4_post_status_check(phba);
4189 }
4190
4191 /* Check to see if any errors occurred during init */
4192 if (status) {
4193 phba->link_state = LPFC_HBA_ERROR;
4194 retval = 1;
4195 } else
4196 phba->sli4_hba.intr_enable = 0;
4197
4198 return retval;
4199}
4200
4201/**
4202 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4203 * @phba: Pointer to HBA context object.
4204 * @mask: Bit mask to be checked.
4205 *
4206 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4207 * from the API jump table function pointer from the lpfc_hba struct.
4208 **/
4209int
4210lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4211{
4212 return phba->lpfc_sli_brdready(phba, mask);
4213}
4214
James Smart92908312006-03-07 15:04:13 -05004215#define BARRIER_TEST_PATTERN (0xdeadbeef)
4216
James Smarte59058c2008-08-24 21:49:00 -04004217/**
James Smart3621a712009-04-06 18:47:14 -04004218 * lpfc_reset_barrier - Make HBA ready for HBA reset
James Smarte59058c2008-08-24 21:49:00 -04004219 * @phba: Pointer to HBA context object.
4220 *
James Smart1b511972011-12-13 13:23:09 -05004221 * This function is called before resetting an HBA. This function is called
4222 * with hbalock held and requests HBA to quiesce DMAs before a reset.
James Smarte59058c2008-08-24 21:49:00 -04004223 **/
James Smart2e0fef82007-06-17 19:56:36 -05004224void lpfc_reset_barrier(struct lpfc_hba *phba)
James Smart92908312006-03-07 15:04:13 -05004225{
James Smart65a29c12006-07-06 15:50:50 -04004226 uint32_t __iomem *resp_buf;
4227 uint32_t __iomem *mbox_buf;
James Smart92908312006-03-07 15:04:13 -05004228 volatile uint32_t mbox;
James Smart9940b972011-03-11 16:06:12 -05004229 uint32_t hc_copy, ha_copy, resp_data;
James Smart92908312006-03-07 15:04:13 -05004230 int i;
4231 uint8_t hdrtype;
4232
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01004233 lockdep_assert_held(&phba->hbalock);
4234
James Smart92908312006-03-07 15:04:13 -05004235 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4236 if (hdrtype != 0x80 ||
4237 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4238 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4239 return;
4240
4241 /*
4242 * Tell the other part of the chip to suspend temporarily all
4243 * its DMA activity.
4244 */
James Smart65a29c12006-07-06 15:50:50 -04004245 resp_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004246
4247 /* Disable the error attention */
James Smart9940b972011-03-11 16:06:12 -05004248 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4249 return;
James Smart92908312006-03-07 15:04:13 -05004250 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4251 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004252 phba->link_flag |= LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004253
James Smart9940b972011-03-11 16:06:12 -05004254 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4255 return;
4256 if (ha_copy & HA_ERATT) {
James Smart92908312006-03-07 15:04:13 -05004257 /* Clear Chip error bit */
4258 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004259 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004260 }
4261
4262 mbox = 0;
4263 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4264 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4265
4266 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
James Smart65a29c12006-07-06 15:50:50 -04004267 mbox_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004268 writel(mbox, mbox_buf);
4269
James Smart9940b972011-03-11 16:06:12 -05004270 for (i = 0; i < 50; i++) {
4271 if (lpfc_readl((resp_buf + 1), &resp_data))
4272 return;
4273 if (resp_data != ~(BARRIER_TEST_PATTERN))
4274 mdelay(1);
4275 else
4276 break;
4277 }
4278 resp_data = 0;
4279 if (lpfc_readl((resp_buf + 1), &resp_data))
4280 return;
4281 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
James Smartf4b4c682009-05-22 14:53:12 -04004282 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
James Smart2e0fef82007-06-17 19:56:36 -05004283 phba->pport->stopped)
James Smart92908312006-03-07 15:04:13 -05004284 goto restore_hc;
4285 else
4286 goto clear_errat;
4287 }
4288
4289 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
James Smart9940b972011-03-11 16:06:12 -05004290 resp_data = 0;
4291 for (i = 0; i < 500; i++) {
4292 if (lpfc_readl(resp_buf, &resp_data))
4293 return;
4294 if (resp_data != mbox)
4295 mdelay(1);
4296 else
4297 break;
4298 }
James Smart92908312006-03-07 15:04:13 -05004299
4300clear_errat:
4301
James Smart9940b972011-03-11 16:06:12 -05004302 while (++i < 500) {
4303 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4304 return;
4305 if (!(ha_copy & HA_ERATT))
4306 mdelay(1);
4307 else
4308 break;
4309 }
James Smart92908312006-03-07 15:04:13 -05004310
4311 if (readl(phba->HAregaddr) & HA_ERATT) {
4312 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004313 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004314 }
4315
4316restore_hc:
James Smart2e0fef82007-06-17 19:56:36 -05004317 phba->link_flag &= ~LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004318 writel(hc_copy, phba->HCregaddr);
4319 readl(phba->HCregaddr); /* flush */
4320}
4321
James Smarte59058c2008-08-24 21:49:00 -04004322/**
James Smart3621a712009-04-06 18:47:14 -04004323 * lpfc_sli_brdkill - Issue a kill_board mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004324 * @phba: Pointer to HBA context object.
4325 *
4326 * This function issues a kill_board mailbox command and waits for
4327 * the error attention interrupt. This function is called for stopping
4328 * the firmware processing. The caller is not required to hold any
4329 * locks. This function calls lpfc_hba_down_post function to free
4330 * any pending commands after the kill. The function will return 1 when it
4331 * fails to kill the board else will return 0.
4332 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004333int
James Smart2e0fef82007-06-17 19:56:36 -05004334lpfc_sli_brdkill(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05004335{
Jamie Wellnitz41415862006-02-28 19:25:27 -05004336 struct lpfc_sli *psli;
4337 LPFC_MBOXQ_t *pmb;
4338 uint32_t status;
4339 uint32_t ha_copy;
4340 int retval;
4341 int i = 0;
4342
4343 psli = &phba->sli;
4344
4345 /* Kill HBA */
James Smarted957682007-06-17 19:56:37 -05004346 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004347 "0329 Kill HBA Data: x%x x%x\n",
4348 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004349
James Smart98c9ea52007-10-27 13:37:33 -04004350 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4351 if (!pmb)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004352 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004353
4354 /* Disable the error attention */
James Smart2e0fef82007-06-17 19:56:36 -05004355 spin_lock_irq(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -05004356 if (lpfc_readl(phba->HCregaddr, &status)) {
4357 spin_unlock_irq(&phba->hbalock);
4358 mempool_free(pmb, phba->mbox_mem_pool);
4359 return 1;
4360 }
Jamie Wellnitz41415862006-02-28 19:25:27 -05004361 status &= ~HC_ERINT_ENA;
4362 writel(status, phba->HCregaddr);
4363 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004364 phba->link_flag |= LS_IGNORE_ERATT;
4365 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004366
4367 lpfc_kill_board(phba, pmb);
4368 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4369 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4370
4371 if (retval != MBX_SUCCESS) {
4372 if (retval != MBX_BUSY)
4373 mempool_free(pmb, phba->mbox_mem_pool);
James Smarte40a02c2010-02-26 14:13:54 -05004374 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4375 "2752 KILL_BOARD command failed retval %d\n",
4376 retval);
James Smart2e0fef82007-06-17 19:56:36 -05004377 spin_lock_irq(&phba->hbalock);
4378 phba->link_flag &= ~LS_IGNORE_ERATT;
4379 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004380 return 1;
4381 }
4382
James Smartf4b4c682009-05-22 14:53:12 -04004383 spin_lock_irq(&phba->hbalock);
4384 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4385 spin_unlock_irq(&phba->hbalock);
James Smart92908312006-03-07 15:04:13 -05004386
Jamie Wellnitz41415862006-02-28 19:25:27 -05004387 mempool_free(pmb, phba->mbox_mem_pool);
4388
4389 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4390 * attention every 100ms for 3 seconds. If we don't get ERATT after
4391 * 3 seconds we still set HBA_ERROR state because the status of the
4392 * board is now undefined.
4393 */
James Smart9940b972011-03-11 16:06:12 -05004394 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4395 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004396 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4397 mdelay(100);
James Smart9940b972011-03-11 16:06:12 -05004398 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4399 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004400 }
4401
4402 del_timer_sync(&psli->mbox_tmo);
James Smart92908312006-03-07 15:04:13 -05004403 if (ha_copy & HA_ERATT) {
4404 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004405 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004406 }
James Smart2e0fef82007-06-17 19:56:36 -05004407 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004408 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart04c68492009-05-22 14:52:52 -04004409 psli->mbox_active = NULL;
James Smart2e0fef82007-06-17 19:56:36 -05004410 phba->link_flag &= ~LS_IGNORE_ERATT;
4411 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004412
Jamie Wellnitz41415862006-02-28 19:25:27 -05004413 lpfc_hba_down_post(phba);
James Smart2e0fef82007-06-17 19:56:36 -05004414 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004415
James Smart2e0fef82007-06-17 19:56:36 -05004416 return ha_copy & HA_ERATT ? 0 : 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004417}
4418
James Smarte59058c2008-08-24 21:49:00 -04004419/**
James Smart3772a992009-05-22 14:50:54 -04004420 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
James Smarte59058c2008-08-24 21:49:00 -04004421 * @phba: Pointer to HBA context object.
4422 *
4423 * This function resets the HBA by writing HC_INITFF to the control
4424 * register. After the HBA resets, this function resets all the iocb ring
4425 * indices. This function disables PCI layer parity checking during
4426 * the reset.
4427 * This function returns 0 always.
4428 * The caller is not required to hold any locks.
4429 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004430int
James Smart2e0fef82007-06-17 19:56:36 -05004431lpfc_sli_brdreset(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004432{
4433 struct lpfc_sli *psli;
dea31012005-04-17 16:05:31 -05004434 struct lpfc_sli_ring *pring;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004435 uint16_t cfg_value;
dea31012005-04-17 16:05:31 -05004436 int i;
dea31012005-04-17 16:05:31 -05004437
Jamie Wellnitz41415862006-02-28 19:25:27 -05004438 psli = &phba->sli;
dea31012005-04-17 16:05:31 -05004439
Jamie Wellnitz41415862006-02-28 19:25:27 -05004440 /* Reset HBA */
4441 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004442 "0325 Reset HBA Data: x%x x%x\n",
James Smart4492b732017-04-27 15:08:26 -07004443 (phba->pport) ? phba->pport->port_state : 0,
4444 psli->sli_flag);
dea31012005-04-17 16:05:31 -05004445
4446 /* perform board reset */
4447 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004448 phba->link_events = 0;
James Smart4492b732017-04-27 15:08:26 -07004449 if (phba->pport) {
4450 phba->pport->fc_myDID = 0;
4451 phba->pport->fc_prevDID = 0;
4452 }
dea31012005-04-17 16:05:31 -05004453
Jamie Wellnitz41415862006-02-28 19:25:27 -05004454 /* Turn off parity checking and serr during the physical reset */
James Smart32a93102019-03-12 16:30:13 -07004455 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4456 return -EIO;
4457
Jamie Wellnitz41415862006-02-28 19:25:27 -05004458 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4459 (cfg_value &
4460 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4461
James Smart3772a992009-05-22 14:50:54 -04004462 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4463
Jamie Wellnitz41415862006-02-28 19:25:27 -05004464 /* Now toggle INITFF bit in the Host Control Register */
4465 writel(HC_INITFF, phba->HCregaddr);
4466 mdelay(1);
4467 readl(phba->HCregaddr); /* flush */
4468 writel(0, phba->HCregaddr);
4469 readl(phba->HCregaddr); /* flush */
4470
4471 /* Restore PCI cmd register */
4472 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea31012005-04-17 16:05:31 -05004473
4474 /* Initialize relevant SLI info */
Jamie Wellnitz41415862006-02-28 19:25:27 -05004475 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08004476 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05004477 pring->flag = 0;
James Smart7e56aa22012-08-03 12:35:34 -04004478 pring->sli.sli3.rspidx = 0;
4479 pring->sli.sli3.next_cmdidx = 0;
4480 pring->sli.sli3.local_getidx = 0;
4481 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -05004482 pring->missbufcnt = 0;
4483 }
dea31012005-04-17 16:05:31 -05004484
James Smart2e0fef82007-06-17 19:56:36 -05004485 phba->link_state = LPFC_WARM_START;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004486 return 0;
4487}
4488
James Smarte59058c2008-08-24 21:49:00 -04004489/**
James Smartda0436e2009-05-22 14:51:39 -04004490 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4491 * @phba: Pointer to HBA context object.
4492 *
4493 * This function resets a SLI4 HBA. This function disables PCI layer parity
4494 * checking during resets the device. The caller is not required to hold
4495 * any locks.
4496 *
James Smart8c24a4f2019-08-14 16:56:53 -07004497 * This function returns 0 on success else returns negative error code.
James Smartda0436e2009-05-22 14:51:39 -04004498 **/
4499int
4500lpfc_sli4_brdreset(struct lpfc_hba *phba)
4501{
4502 struct lpfc_sli *psli = &phba->sli;
4503 uint16_t cfg_value;
James Smart02936352014-04-04 13:52:12 -04004504 int rc = 0;
James Smartda0436e2009-05-22 14:51:39 -04004505
4506 /* Reset HBA */
4507 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart02936352014-04-04 13:52:12 -04004508 "0295 Reset HBA Data: x%x x%x x%x\n",
4509 phba->pport->port_state, psli->sli_flag,
4510 phba->hba_flag);
James Smartda0436e2009-05-22 14:51:39 -04004511
4512 /* perform board reset */
4513 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004514 phba->link_events = 0;
James Smartda0436e2009-05-22 14:51:39 -04004515 phba->pport->fc_myDID = 0;
4516 phba->pport->fc_prevDID = 0;
4517
James Smartda0436e2009-05-22 14:51:39 -04004518 spin_lock_irq(&phba->hbalock);
4519 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4520 phba->fcf.fcf_flag = 0;
James Smartda0436e2009-05-22 14:51:39 -04004521 spin_unlock_irq(&phba->hbalock);
4522
James Smart02936352014-04-04 13:52:12 -04004523 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4524 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4525 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4526 return rc;
4527 }
4528
James Smartda0436e2009-05-22 14:51:39 -04004529 /* Now physically reset the device */
4530 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4531 "0389 Performing PCI function reset!\n");
James Smartbe858b62010-12-15 17:57:20 -05004532
4533 /* Turn off parity checking and serr during the physical reset */
James Smart32a93102019-03-12 16:30:13 -07004534 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4536 "3205 PCI read Config failed\n");
4537 return -EIO;
4538 }
4539
James Smartbe858b62010-12-15 17:57:20 -05004540 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4541 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4542
James Smart88318812012-09-29 11:29:29 -04004543 /* Perform FCoE PCI function reset before freeing queue memory */
James Smart27b01b82012-05-09 21:19:44 -04004544 rc = lpfc_pci_function_reset(phba);
James Smartda0436e2009-05-22 14:51:39 -04004545
James Smartbe858b62010-12-15 17:57:20 -05004546 /* Restore PCI cmd register */
4547 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4548
James Smart27b01b82012-05-09 21:19:44 -04004549 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004550}
4551
4552/**
4553 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
James Smarte59058c2008-08-24 21:49:00 -04004554 * @phba: Pointer to HBA context object.
4555 *
4556 * This function is called in the SLI initialization code path to
4557 * restart the HBA. The caller is not required to hold any lock.
4558 * This function writes MBX_RESTART mailbox command to the SLIM and
4559 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4560 * function to free any pending commands. The function enables
4561 * POST only during the first initialization. The function returns zero.
4562 * The function does not guarantee completion of MBX_RESTART mailbox
4563 * command before the return of this function.
4564 **/
James Smartda0436e2009-05-22 14:51:39 -04004565static int
4566lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004567{
4568 MAILBOX_t *mb;
4569 struct lpfc_sli *psli;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004570 volatile uint32_t word0;
4571 void __iomem *to_slim;
James Smart0d878412009-10-02 15:16:56 -04004572 uint32_t hba_aer_enabled;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004573
James Smart2e0fef82007-06-17 19:56:36 -05004574 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004575
James Smart0d878412009-10-02 15:16:56 -04004576 /* Take PCIe device Advanced Error Reporting (AER) state */
4577 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4578
Jamie Wellnitz41415862006-02-28 19:25:27 -05004579 psli = &phba->sli;
4580
4581 /* Restart HBA */
4582 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004583 "0337 Restart HBA Data: x%x x%x\n",
James Smart4492b732017-04-27 15:08:26 -07004584 (phba->pport) ? phba->pport->port_state : 0,
4585 psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004586
4587 word0 = 0;
4588 mb = (MAILBOX_t *) &word0;
4589 mb->mbxCommand = MBX_RESTART;
4590 mb->mbxHc = 1;
4591
James Smart92908312006-03-07 15:04:13 -05004592 lpfc_reset_barrier(phba);
4593
Jamie Wellnitz41415862006-02-28 19:25:27 -05004594 to_slim = phba->MBslimaddr;
4595 writel(*(uint32_t *) mb, to_slim);
4596 readl(to_slim); /* flush */
4597
4598 /* Only skip post after fc_ffinit is completed */
James Smart4492b732017-04-27 15:08:26 -07004599 if (phba->pport && phba->pport->port_state)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004600 word0 = 1; /* This is really setting up word1 */
James Smarteaf15d52008-12-04 22:39:29 -05004601 else
Jamie Wellnitz41415862006-02-28 19:25:27 -05004602 word0 = 0; /* This is really setting up word1 */
James Smart65a29c12006-07-06 15:50:50 -04004603 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004604 writel(*(uint32_t *) mb, to_slim);
4605 readl(to_slim); /* flush */
4606
4607 lpfc_sli_brdreset(phba);
James Smart4492b732017-04-27 15:08:26 -07004608 if (phba->pport)
4609 phba->pport->stopped = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004610 phba->link_state = LPFC_INIT_START;
James Smartda0436e2009-05-22 14:51:39 -04004611 phba->hba_flag = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004612 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004613
James Smart64ba8812006-08-02 15:24:34 -04004614 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
Arnd Bergmannc4d6204d2018-06-18 17:28:23 +02004615 psli->stats_start = ktime_get_seconds();
James Smart64ba8812006-08-02 15:24:34 -04004616
James Smarteaf15d52008-12-04 22:39:29 -05004617 /* Give the INITFF and Post time to settle. */
4618 mdelay(100);
dea31012005-04-17 16:05:31 -05004619
James Smart0d878412009-10-02 15:16:56 -04004620 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4621 if (hba_aer_enabled)
4622 pci_disable_pcie_error_reporting(phba->pcidev);
4623
Jamie Wellnitz41415862006-02-28 19:25:27 -05004624 lpfc_hba_down_post(phba);
dea31012005-04-17 16:05:31 -05004625
4626 return 0;
4627}
4628
James Smarte59058c2008-08-24 21:49:00 -04004629/**
James Smartda0436e2009-05-22 14:51:39 -04004630 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4631 * @phba: Pointer to HBA context object.
4632 *
4633 * This function is called in the SLI initialization code path to restart
4634 * a SLI4 HBA. The caller is not required to hold any lock.
4635 * At the end of the function, it calls lpfc_hba_down_post function to
4636 * free any pending commands.
4637 **/
4638static int
4639lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4640{
4641 struct lpfc_sli *psli = &phba->sli;
James Smart75baf692010-06-08 18:31:21 -04004642 uint32_t hba_aer_enabled;
James Smart27b01b82012-05-09 21:19:44 -04004643 int rc;
James Smartda0436e2009-05-22 14:51:39 -04004644
4645 /* Restart HBA */
4646 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4647 "0296 Restart HBA Data: x%x x%x\n",
4648 phba->pport->port_state, psli->sli_flag);
4649
James Smart75baf692010-06-08 18:31:21 -04004650 /* Take PCIe device Advanced Error Reporting (AER) state */
4651 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4652
James Smart27b01b82012-05-09 21:19:44 -04004653 rc = lpfc_sli4_brdreset(phba);
James Smart4fb86a62019-09-03 14:54:41 -07004654 if (rc) {
4655 phba->link_state = LPFC_HBA_ERROR;
4656 goto hba_down_queue;
4657 }
James Smartda0436e2009-05-22 14:51:39 -04004658
4659 spin_lock_irq(&phba->hbalock);
4660 phba->pport->stopped = 0;
4661 phba->link_state = LPFC_INIT_START;
4662 phba->hba_flag = 0;
4663 spin_unlock_irq(&phba->hbalock);
4664
4665 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
Arnd Bergmannc4d6204d2018-06-18 17:28:23 +02004666 psli->stats_start = ktime_get_seconds();
James Smartda0436e2009-05-22 14:51:39 -04004667
James Smart75baf692010-06-08 18:31:21 -04004668 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4669 if (hba_aer_enabled)
4670 pci_disable_pcie_error_reporting(phba->pcidev);
4671
James Smart4fb86a62019-09-03 14:54:41 -07004672hba_down_queue:
James Smartda0436e2009-05-22 14:51:39 -04004673 lpfc_hba_down_post(phba);
James Smart569dbe82017-06-15 22:56:47 -07004674 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04004675
James Smart27b01b82012-05-09 21:19:44 -04004676 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004677}
4678
4679/**
4680 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4681 * @phba: Pointer to HBA context object.
4682 *
4683 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4684 * API jump table function pointer from the lpfc_hba struct.
4685**/
4686int
4687lpfc_sli_brdrestart(struct lpfc_hba *phba)
4688{
4689 return phba->lpfc_sli_brdrestart(phba);
4690}
4691
4692/**
James Smart3621a712009-04-06 18:47:14 -04004693 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
James Smarte59058c2008-08-24 21:49:00 -04004694 * @phba: Pointer to HBA context object.
4695 *
4696 * This function is called after a HBA restart to wait for successful
4697 * restart of the HBA. Successful restart of the HBA is indicated by
4698 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4699 * iteration, the function will restart the HBA again. The function returns
4700 * zero if HBA successfully restarted else returns negative error code.
4701 **/
James Smart4492b732017-04-27 15:08:26 -07004702int
dea31012005-04-17 16:05:31 -05004703lpfc_sli_chipset_init(struct lpfc_hba *phba)
4704{
4705 uint32_t status, i = 0;
4706
4707 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004708 if (lpfc_readl(phba->HSregaddr, &status))
4709 return -EIO;
dea31012005-04-17 16:05:31 -05004710
4711 /* Check status register to see what current state is */
4712 i = 0;
4713 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4714
James Smartdcf2a4e2010-09-29 11:18:53 -04004715 /* Check every 10ms for 10 retries, then every 100ms for 90
4716 * retries, then every 1 sec for 50 retires for a total of
4717 * ~60 seconds before reset the board again and check every
4718 * 1 sec for 50 retries. The up to 60 seconds before the
4719 * board ready is required by the Falcon FIPS zeroization
4720 * complete, and any reset the board in between shall cause
4721 * restart of zeroization, further delay the board ready.
dea31012005-04-17 16:05:31 -05004722 */
James Smartdcf2a4e2010-09-29 11:18:53 -04004723 if (i++ >= 200) {
dea31012005-04-17 16:05:31 -05004724 /* Adapter failed to init, timeout, status reg
4725 <status> */
James Smarted957682007-06-17 19:56:37 -05004726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004727 "0436 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004728 "timeout, status reg x%x, "
4729 "FW Data: A8 x%x AC x%x\n", status,
4730 readl(phba->MBslimaddr + 0xa8),
4731 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004732 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004733 return -ETIMEDOUT;
4734 }
4735
4736 /* Check to see if any errors occurred during init */
4737 if (status & HS_FFERM) {
4738 /* ERROR: During chipset initialization */
4739 /* Adapter failed to init, chipset, status reg
4740 <status> */
James Smarted957682007-06-17 19:56:37 -05004741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004742 "0437 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004743 "chipset, status reg x%x, "
4744 "FW Data: A8 x%x AC x%x\n", status,
4745 readl(phba->MBslimaddr + 0xa8),
4746 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004747 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004748 return -EIO;
4749 }
4750
James Smartdcf2a4e2010-09-29 11:18:53 -04004751 if (i <= 10)
dea31012005-04-17 16:05:31 -05004752 msleep(10);
James Smartdcf2a4e2010-09-29 11:18:53 -04004753 else if (i <= 100)
4754 msleep(100);
4755 else
4756 msleep(1000);
dea31012005-04-17 16:05:31 -05004757
James Smartdcf2a4e2010-09-29 11:18:53 -04004758 if (i == 150) {
4759 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05004760 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004761 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004762 }
4763 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004764 if (lpfc_readl(phba->HSregaddr, &status))
4765 return -EIO;
dea31012005-04-17 16:05:31 -05004766 }
4767
4768 /* Check to see if any errors occurred during init */
4769 if (status & HS_FFERM) {
4770 /* ERROR: During chipset initialization */
4771 /* Adapter failed to init, chipset, status reg <status> */
James Smarted957682007-06-17 19:56:37 -05004772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004773 "0438 Adapter failed to init, chipset, "
James Smart09372822008-01-11 01:52:54 -05004774 "status reg x%x, "
4775 "FW Data: A8 x%x AC x%x\n", status,
4776 readl(phba->MBslimaddr + 0xa8),
4777 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004778 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004779 return -EIO;
4780 }
4781
4782 /* Clear all interrupt enable conditions */
4783 writel(0, phba->HCregaddr);
4784 readl(phba->HCregaddr); /* flush */
4785
4786 /* setup host attn register */
4787 writel(0xffffffff, phba->HAregaddr);
4788 readl(phba->HAregaddr); /* flush */
4789 return 0;
4790}
4791
James Smarte59058c2008-08-24 21:49:00 -04004792/**
James Smart3621a712009-04-06 18:47:14 -04004793 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
James Smarte59058c2008-08-24 21:49:00 -04004794 *
4795 * This function calculates and returns the number of HBQs required to be
4796 * configured.
4797 **/
James Smart78b2d852007-08-02 11:10:21 -04004798int
James Smarted957682007-06-17 19:56:37 -05004799lpfc_sli_hbq_count(void)
4800{
James Smart92d7f7b2007-06-17 19:56:38 -05004801 return ARRAY_SIZE(lpfc_hbq_defs);
James Smarted957682007-06-17 19:56:37 -05004802}
4803
James Smarte59058c2008-08-24 21:49:00 -04004804/**
James Smart3621a712009-04-06 18:47:14 -04004805 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004806 *
4807 * This function adds the number of hbq entries in every HBQ to get
4808 * the total number of hbq entries required for the HBA and returns
4809 * the total count.
4810 **/
James Smarted957682007-06-17 19:56:37 -05004811static int
4812lpfc_sli_hbq_entry_count(void)
4813{
4814 int hbq_count = lpfc_sli_hbq_count();
4815 int count = 0;
4816 int i;
4817
4818 for (i = 0; i < hbq_count; ++i)
James Smart92d7f7b2007-06-17 19:56:38 -05004819 count += lpfc_hbq_defs[i]->entry_count;
James Smarted957682007-06-17 19:56:37 -05004820 return count;
4821}
4822
James Smarte59058c2008-08-24 21:49:00 -04004823/**
James Smart3621a712009-04-06 18:47:14 -04004824 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004825 *
4826 * This function calculates amount of memory required for all hbq entries
4827 * to be configured and returns the total memory required.
4828 **/
dea31012005-04-17 16:05:31 -05004829int
James Smarted957682007-06-17 19:56:37 -05004830lpfc_sli_hbq_size(void)
4831{
4832 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4833}
4834
James Smarte59058c2008-08-24 21:49:00 -04004835/**
James Smart3621a712009-04-06 18:47:14 -04004836 * lpfc_sli_hbq_setup - configure and initialize HBQs
James Smarte59058c2008-08-24 21:49:00 -04004837 * @phba: Pointer to HBA context object.
4838 *
4839 * This function is called during the SLI initialization to configure
4840 * all the HBQs and post buffers to the HBQ. The caller is not
4841 * required to hold any locks. This function will return zero if successful
4842 * else it will return negative error code.
4843 **/
James Smarted957682007-06-17 19:56:37 -05004844static int
4845lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4846{
4847 int hbq_count = lpfc_sli_hbq_count();
4848 LPFC_MBOXQ_t *pmb;
4849 MAILBOX_t *pmbox;
4850 uint32_t hbqno;
4851 uint32_t hbq_entry_index;
James Smarted957682007-06-17 19:56:37 -05004852
James Smart92d7f7b2007-06-17 19:56:38 -05004853 /* Get a Mailbox buffer to setup mailbox
4854 * commands for HBA initialization
4855 */
James Smarted957682007-06-17 19:56:37 -05004856 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4857
4858 if (!pmb)
4859 return -ENOMEM;
4860
James Smart04c68492009-05-22 14:52:52 -04004861 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05004862
4863 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4864 phba->link_state = LPFC_INIT_MBX_CMDS;
James Smart3163f722008-02-08 18:50:25 -05004865 phba->hbq_in_use = 1;
James Smarted957682007-06-17 19:56:37 -05004866
4867 hbq_entry_index = 0;
4868 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4869 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4870 phba->hbqs[hbqno].hbqPutIdx = 0;
4871 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4872 phba->hbqs[hbqno].entry_count =
James Smart92d7f7b2007-06-17 19:56:38 -05004873 lpfc_hbq_defs[hbqno]->entry_count;
James Smart51ef4c22007-08-02 11:10:31 -04004874 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4875 hbq_entry_index, pmb);
James Smarted957682007-06-17 19:56:37 -05004876 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4877
4878 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4879 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4880 mbxStatus <status>, ring <num> */
4881
4882 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05004883 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004884 "1805 Adapter failed to init. "
James Smarted957682007-06-17 19:56:37 -05004885 "Data: x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04004886 pmbox->mbxCommand,
James Smarted957682007-06-17 19:56:37 -05004887 pmbox->mbxStatus, hbqno);
4888
4889 phba->link_state = LPFC_HBA_ERROR;
4890 mempool_free(pmb, phba->mbox_mem_pool);
James Smart6e7288d2010-06-07 15:23:35 -04004891 return -ENXIO;
James Smarted957682007-06-17 19:56:37 -05004892 }
4893 }
4894 phba->hbq_count = hbq_count;
4895
James Smarted957682007-06-17 19:56:37 -05004896 mempool_free(pmb, phba->mbox_mem_pool);
4897
James Smart92d7f7b2007-06-17 19:56:38 -05004898 /* Initially populate or replenish the HBQs */
James Smartd7c255b2008-08-24 21:50:00 -04004899 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4900 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
James Smarted957682007-06-17 19:56:37 -05004901 return 0;
4902}
4903
James Smarte59058c2008-08-24 21:49:00 -04004904/**
James Smart4f774512009-05-22 14:52:35 -04004905 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4906 * @phba: Pointer to HBA context object.
4907 *
4908 * This function is called during the SLI initialization to configure
4909 * all the HBQs and post buffers to the HBQ. The caller is not
4910 * required to hold any locks. This function will return zero if successful
4911 * else it will return negative error code.
4912 **/
4913static int
4914lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4915{
4916 phba->hbq_in_use = 1;
James Smart999fbbc2019-12-18 15:58:06 -08004917 /**
4918 * Specific case when the MDS diagnostics is enabled and supported.
4919 * The receive buffer count is truncated to manage the incoming
4920 * traffic.
4921 **/
4922 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
4923 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4924 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
4925 else
4926 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4927 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
James Smart4f774512009-05-22 14:52:35 -04004928 phba->hbq_count = 1;
James Smart895427b2017-02-12 13:52:30 -08004929 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
James Smart4f774512009-05-22 14:52:35 -04004930 /* Initially populate or replenish the HBQs */
James Smart4f774512009-05-22 14:52:35 -04004931 return 0;
4932}
4933
4934/**
James Smart3621a712009-04-06 18:47:14 -04004935 * lpfc_sli_config_port - Issue config port mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004936 * @phba: Pointer to HBA context object.
4937 * @sli_mode: sli mode - 2/3
4938 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08004939 * This function is called by the sli initialization code path
James Smarte59058c2008-08-24 21:49:00 -04004940 * to issue config_port mailbox command. This function restarts the
4941 * HBA firmware and issues a config_port mailbox command to configure
4942 * the SLI interface in the sli mode specified by sli_mode
4943 * variable. The caller is not required to hold any locks.
4944 * The function returns 0 if successful, else returns negative error
4945 * code.
4946 **/
James Smart93996272008-08-24 21:50:30 -04004947int
4948lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea31012005-04-17 16:05:31 -05004949{
4950 LPFC_MBOXQ_t *pmb;
4951 uint32_t resetcount = 0, rc = 0, done = 0;
4952
4953 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4954 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -05004955 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004956 return -ENOMEM;
4957 }
4958
James Smarted957682007-06-17 19:56:37 -05004959 phba->sli_rev = sli_mode;
dea31012005-04-17 16:05:31 -05004960 while (resetcount < 2 && !done) {
James Smart2e0fef82007-06-17 19:56:36 -05004961 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004962 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004963 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05004964 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004965 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004966 rc = lpfc_sli_chipset_init(phba);
4967 if (rc)
4968 break;
4969
James Smart2e0fef82007-06-17 19:56:36 -05004970 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004971 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004972 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004973 resetcount++;
4974
James Smarted957682007-06-17 19:56:37 -05004975 /* Call pre CONFIG_PORT mailbox command initialization. A
4976 * value of 0 means the call was successful. Any other
4977 * nonzero value is a failure, but if ERESTART is returned,
4978 * the driver may reset the HBA and try again.
4979 */
dea31012005-04-17 16:05:31 -05004980 rc = lpfc_config_port_prep(phba);
4981 if (rc == -ERESTART) {
James Smarted957682007-06-17 19:56:37 -05004982 phba->link_state = LPFC_LINK_UNKNOWN;
dea31012005-04-17 16:05:31 -05004983 continue;
James Smart34b02dc2008-08-24 21:49:55 -04004984 } else if (rc)
dea31012005-04-17 16:05:31 -05004985 break;
James Smart6d368e52011-05-24 11:44:12 -04004986
James Smart2e0fef82007-06-17 19:56:36 -05004987 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05004988 lpfc_config_port(phba, pmb);
4989 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
James Smart34b02dc2008-08-24 21:49:55 -04004990 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4991 LPFC_SLI3_HBQ_ENABLED |
4992 LPFC_SLI3_CRP_ENABLED |
James Smartbc739052010-08-04 16:11:18 -04004993 LPFC_SLI3_DSS_ENABLED);
James Smarted957682007-06-17 19:56:37 -05004994 if (rc != MBX_SUCCESS) {
dea31012005-04-17 16:05:31 -05004995 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004996 "0442 Adapter failed to init, mbxCmd x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05004997 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
James Smart04c68492009-05-22 14:52:52 -04004998 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
James Smart2e0fef82007-06-17 19:56:36 -05004999 spin_lock_irq(&phba->hbalock);
James Smart04c68492009-05-22 14:52:52 -04005000 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05005001 spin_unlock_irq(&phba->hbalock);
5002 rc = -ENXIO;
James Smart04c68492009-05-22 14:52:52 -04005003 } else {
5004 /* Allow asynchronous mailbox command to go through */
5005 spin_lock_irq(&phba->hbalock);
5006 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5007 spin_unlock_irq(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05005008 done = 1;
James Smartcb69f7d2011-12-13 13:21:57 -05005009
5010 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5011 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5012 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5013 "3110 Port did not grant ASABT\n");
James Smart04c68492009-05-22 14:52:52 -04005014 }
dea31012005-04-17 16:05:31 -05005015 }
James Smarted957682007-06-17 19:56:37 -05005016 if (!done) {
5017 rc = -EINVAL;
5018 goto do_prep_failed;
5019 }
James Smart04c68492009-05-22 14:52:52 -04005020 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5021 if (!pmb->u.mb.un.varCfgPort.cMA) {
James Smart34b02dc2008-08-24 21:49:55 -04005022 rc = -ENXIO;
5023 goto do_prep_failed;
5024 }
James Smart04c68492009-05-22 14:52:52 -04005025 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
James Smart34b02dc2008-08-24 21:49:55 -04005026 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04005027 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5028 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5029 phba->max_vpi : phba->max_vports;
5030
James Smart34b02dc2008-08-24 21:49:55 -04005031 } else
5032 phba->max_vpi = 0;
James Smart04c68492009-05-22 14:52:52 -04005033 if (pmb->u.mb.un.varCfgPort.gerbm)
James Smart34b02dc2008-08-24 21:49:55 -04005034 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04005035 if (pmb->u.mb.un.varCfgPort.gcrp)
James Smart34b02dc2008-08-24 21:49:55 -04005036 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
James Smart6e7288d2010-06-07 15:23:35 -04005037
5038 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5039 phba->port_gp = phba->mbox->us.s3_pgp.port;
James Smarte2a0a9d2008-12-04 22:40:02 -05005040
James Smartf44ac122018-03-05 12:04:08 -08005041 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5042 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5043 phba->cfg_enable_bg = 0;
5044 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
James Smarte2a0a9d2008-12-04 22:40:02 -05005045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5046 "0443 Adapter did not grant "
5047 "BlockGuard\n");
James Smartf44ac122018-03-05 12:04:08 -08005048 }
James Smarte2a0a9d2008-12-04 22:40:02 -05005049 }
James Smart34b02dc2008-08-24 21:49:55 -04005050 } else {
James Smart8f34f4c2008-12-04 22:39:23 -05005051 phba->hbq_get = NULL;
James Smart34b02dc2008-08-24 21:49:55 -04005052 phba->port_gp = phba->mbox->us.s2.port;
James Smartd7c255b2008-08-24 21:50:00 -04005053 phba->max_vpi = 0;
James Smarted957682007-06-17 19:56:37 -05005054 }
James Smart92d7f7b2007-06-17 19:56:38 -05005055do_prep_failed:
James Smarted957682007-06-17 19:56:37 -05005056 mempool_free(pmb, phba->mbox_mem_pool);
5057 return rc;
5058}
5059
James Smarte59058c2008-08-24 21:49:00 -04005060
5061/**
Masahiro Yamada183b8022017-02-27 14:29:20 -08005062 * lpfc_sli_hba_setup - SLI initialization function
James Smarte59058c2008-08-24 21:49:00 -04005063 * @phba: Pointer to HBA context object.
5064 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08005065 * This function is the main SLI initialization function. This function
5066 * is called by the HBA initialization code, HBA reset code and HBA
James Smarte59058c2008-08-24 21:49:00 -04005067 * error attention handler code. Caller is not required to hold any
5068 * locks. This function issues config_port mailbox command to configure
5069 * the SLI, setup iocb rings and HBQ rings. In the end the function
5070 * calls the config_port_post function to issue init_link mailbox
5071 * command and to start the discovery. The function will return zero
5072 * if successful, else it will return negative error code.
5073 **/
James Smarted957682007-06-17 19:56:37 -05005074int
5075lpfc_sli_hba_setup(struct lpfc_hba *phba)
5076{
5077 uint32_t rc;
James Smart6d368e52011-05-24 11:44:12 -04005078 int mode = 3, i;
5079 int longs;
James Smarted957682007-06-17 19:56:37 -05005080
James Smart12247e82016-07-06 12:36:09 -07005081 switch (phba->cfg_sli_mode) {
James Smarted957682007-06-17 19:56:37 -05005082 case 2:
James Smart78b2d852007-08-02 11:10:21 -04005083 if (phba->cfg_enable_npiv) {
James Smart92d7f7b2007-06-17 19:56:38 -05005084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smart12247e82016-07-06 12:36:09 -07005085 "1824 NPIV enabled: Override sli_mode "
James Smart92d7f7b2007-06-17 19:56:38 -05005086 "parameter (%d) to auto (0).\n",
James Smart12247e82016-07-06 12:36:09 -07005087 phba->cfg_sli_mode);
James Smart92d7f7b2007-06-17 19:56:38 -05005088 break;
5089 }
James Smarted957682007-06-17 19:56:37 -05005090 mode = 2;
5091 break;
5092 case 0:
5093 case 3:
5094 break;
5095 default:
James Smart92d7f7b2007-06-17 19:56:38 -05005096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smart12247e82016-07-06 12:36:09 -07005097 "1819 Unrecognized sli_mode parameter: %d.\n",
5098 phba->cfg_sli_mode);
James Smarted957682007-06-17 19:56:37 -05005099
5100 break;
5101 }
James Smartb5c53952016-03-31 14:12:30 -07005102 phba->fcp_embed_io = 0; /* SLI4 FC support only */
James Smarted957682007-06-17 19:56:37 -05005103
James Smart93996272008-08-24 21:50:30 -04005104 rc = lpfc_sli_config_port(phba, mode);
5105
James Smart12247e82016-07-06 12:36:09 -07005106 if (rc && phba->cfg_sli_mode == 3)
James Smart92d7f7b2007-06-17 19:56:38 -05005107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04005108 "1820 Unable to select SLI-3. "
5109 "Not supported by adapter.\n");
James Smarted957682007-06-17 19:56:37 -05005110 if (rc && mode != 2)
James Smart93996272008-08-24 21:50:30 -04005111 rc = lpfc_sli_config_port(phba, 2);
James Smart4597663f2016-07-06 12:36:01 -07005112 else if (rc && mode == 2)
5113 rc = lpfc_sli_config_port(phba, 3);
James Smarted957682007-06-17 19:56:37 -05005114 if (rc)
dea31012005-04-17 16:05:31 -05005115 goto lpfc_sli_hba_setup_error;
5116
James Smart0d878412009-10-02 15:16:56 -04005117 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5118 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5119 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5120 if (!rc) {
5121 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5122 "2709 This device supports "
5123 "Advanced Error Reporting (AER)\n");
5124 spin_lock_irq(&phba->hbalock);
5125 phba->hba_flag |= HBA_AER_ENABLED;
5126 spin_unlock_irq(&phba->hbalock);
5127 } else {
5128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5129 "2708 This device does not support "
James Smartb069d7e2013-05-31 17:04:36 -04005130 "Advanced Error Reporting (AER): %d\n",
5131 rc);
James Smart0d878412009-10-02 15:16:56 -04005132 phba->cfg_aer_support = 0;
5133 }
5134 }
5135
James Smarted957682007-06-17 19:56:37 -05005136 if (phba->sli_rev == 3) {
5137 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5138 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
James Smarted957682007-06-17 19:56:37 -05005139 } else {
5140 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5141 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
James Smart92d7f7b2007-06-17 19:56:38 -05005142 phba->sli3_options = 0;
James Smarted957682007-06-17 19:56:37 -05005143 }
5144
5145 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04005146 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5147 phba->sli_rev, phba->max_vpi);
James Smarted957682007-06-17 19:56:37 -05005148 rc = lpfc_sli_ring_map(phba);
dea31012005-04-17 16:05:31 -05005149
5150 if (rc)
5151 goto lpfc_sli_hba_setup_error;
5152
James Smart6d368e52011-05-24 11:44:12 -04005153 /* Initialize VPIs. */
5154 if (phba->sli_rev == LPFC_SLI_REV3) {
5155 /*
5156 * The VPI bitmask and physical ID array are allocated
5157 * and initialized once only - at driver load. A port
5158 * reset doesn't need to reinitialize this memory.
5159 */
5160 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5161 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07005162 phba->vpi_bmask = kcalloc(longs,
5163 sizeof(unsigned long),
James Smart6d368e52011-05-24 11:44:12 -04005164 GFP_KERNEL);
5165 if (!phba->vpi_bmask) {
5166 rc = -ENOMEM;
5167 goto lpfc_sli_hba_setup_error;
5168 }
5169
Kees Cook6396bb22018-06-12 14:03:40 -07005170 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5171 sizeof(uint16_t),
5172 GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -04005173 if (!phba->vpi_ids) {
5174 kfree(phba->vpi_bmask);
5175 rc = -ENOMEM;
5176 goto lpfc_sli_hba_setup_error;
5177 }
5178 for (i = 0; i < phba->max_vpi; i++)
5179 phba->vpi_ids[i] = i;
5180 }
5181 }
5182
James Smart93996272008-08-24 21:50:30 -04005183 /* Init HBQs */
James Smarted957682007-06-17 19:56:37 -05005184 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5185 rc = lpfc_sli_hbq_setup(phba);
5186 if (rc)
5187 goto lpfc_sli_hba_setup_error;
5188 }
James Smart04c68492009-05-22 14:52:52 -04005189 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05005190 phba->sli.sli_flag |= LPFC_PROCESS_LA;
James Smart04c68492009-05-22 14:52:52 -04005191 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05005192
5193 rc = lpfc_config_port_post(phba);
5194 if (rc)
5195 goto lpfc_sli_hba_setup_error;
5196
James Smarted957682007-06-17 19:56:37 -05005197 return rc;
5198
James Smart92d7f7b2007-06-17 19:56:38 -05005199lpfc_sli_hba_setup_error:
James Smart2e0fef82007-06-17 19:56:36 -05005200 phba->link_state = LPFC_HBA_ERROR;
James Smarte40a02c2010-02-26 14:13:54 -05005201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04005202 "0445 Firmware initialization failed\n");
dea31012005-04-17 16:05:31 -05005203 return rc;
5204}
5205
James Smartda0436e2009-05-22 14:51:39 -04005206/**
5207 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5208 * @phba: Pointer to HBA context object.
5209 * @mboxq: mailbox pointer.
5210 * This function issue a dump mailbox command to read config region
5211 * 23 and parse the records in the region and populate driver
5212 * data structure.
5213 **/
5214static int
James Smartff78d8f2011-12-13 13:21:35 -05005215lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
James Smartda0436e2009-05-22 14:51:39 -04005216{
James Smartff78d8f2011-12-13 13:21:35 -05005217 LPFC_MBOXQ_t *mboxq;
James Smartda0436e2009-05-22 14:51:39 -04005218 struct lpfc_dmabuf *mp;
5219 struct lpfc_mqe *mqe;
5220 uint32_t data_length;
5221 int rc;
5222
5223 /* Program the default value of vlan_id and fc_map */
5224 phba->valid_vlan = 0;
5225 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5226 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5227 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5228
James Smartff78d8f2011-12-13 13:21:35 -05005229 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5230 if (!mboxq)
James Smartda0436e2009-05-22 14:51:39 -04005231 return -ENOMEM;
5232
James Smartff78d8f2011-12-13 13:21:35 -05005233 mqe = &mboxq->u.mqe;
5234 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5235 rc = -ENOMEM;
5236 goto out_free_mboxq;
5237 }
5238
James Smart3e1f0712018-11-29 16:09:29 -08005239 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
James Smartda0436e2009-05-22 14:51:39 -04005240 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5241
5242 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5243 "(%d):2571 Mailbox cmd x%x Status x%x "
5244 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5245 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5246 "CQ: x%x x%x x%x x%x\n",
5247 mboxq->vport ? mboxq->vport->vpi : 0,
5248 bf_get(lpfc_mqe_command, mqe),
5249 bf_get(lpfc_mqe_status, mqe),
5250 mqe->un.mb_words[0], mqe->un.mb_words[1],
5251 mqe->un.mb_words[2], mqe->un.mb_words[3],
5252 mqe->un.mb_words[4], mqe->un.mb_words[5],
5253 mqe->un.mb_words[6], mqe->un.mb_words[7],
5254 mqe->un.mb_words[8], mqe->un.mb_words[9],
5255 mqe->un.mb_words[10], mqe->un.mb_words[11],
5256 mqe->un.mb_words[12], mqe->un.mb_words[13],
5257 mqe->un.mb_words[14], mqe->un.mb_words[15],
5258 mqe->un.mb_words[16], mqe->un.mb_words[50],
5259 mboxq->mcqe.word0,
5260 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5261 mboxq->mcqe.trailer);
5262
5263 if (rc) {
5264 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5265 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005266 rc = -EIO;
5267 goto out_free_mboxq;
James Smartda0436e2009-05-22 14:51:39 -04005268 }
5269 data_length = mqe->un.mb_words[5];
James Smarta0c87cb2009-07-19 10:01:10 -04005270 if (data_length > DMP_RGN23_SIZE) {
James Smartd11e31d2009-06-10 17:23:06 -04005271 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5272 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005273 rc = -EIO;
5274 goto out_free_mboxq;
James Smartd11e31d2009-06-10 17:23:06 -04005275 }
James Smartda0436e2009-05-22 14:51:39 -04005276
5277 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5278 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5279 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005280 rc = 0;
5281
5282out_free_mboxq:
5283 mempool_free(mboxq, phba->mbox_mem_pool);
5284 return rc;
James Smartda0436e2009-05-22 14:51:39 -04005285}
5286
5287/**
5288 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5289 * @phba: pointer to lpfc hba data structure.
5290 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5291 * @vpd: pointer to the memory to hold resulting port vpd data.
5292 * @vpd_size: On input, the number of bytes allocated to @vpd.
5293 * On output, the number of data bytes in @vpd.
5294 *
5295 * This routine executes a READ_REV SLI4 mailbox command. In
5296 * addition, this routine gets the port vpd data.
5297 *
5298 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02005299 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -04005300 * -ENOMEM - could not allocated memory.
James Smartda0436e2009-05-22 14:51:39 -04005301 **/
5302static int
5303lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5304 uint8_t *vpd, uint32_t *vpd_size)
5305{
5306 int rc = 0;
5307 uint32_t dma_size;
5308 struct lpfc_dmabuf *dmabuf;
5309 struct lpfc_mqe *mqe;
5310
5311 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5312 if (!dmabuf)
5313 return -ENOMEM;
5314
5315 /*
5316 * Get a DMA buffer for the vpd data resulting from the READ_REV
5317 * mailbox command.
5318 */
5319 dma_size = *vpd_size;
Luis Chamberlain750afb02019-01-04 09:23:09 +01005320 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5321 &dmabuf->phys, GFP_KERNEL);
James Smartda0436e2009-05-22 14:51:39 -04005322 if (!dmabuf->virt) {
5323 kfree(dmabuf);
5324 return -ENOMEM;
5325 }
James Smartda0436e2009-05-22 14:51:39 -04005326
5327 /*
5328 * The SLI4 implementation of READ_REV conflicts at word1,
5329 * bits 31:16 and SLI4 adds vpd functionality not present
5330 * in SLI3. This code corrects the conflicts.
5331 */
5332 lpfc_read_rev(phba, mboxq);
5333 mqe = &mboxq->u.mqe;
5334 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5335 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5336 mqe->un.read_rev.word1 &= 0x0000FFFF;
5337 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5338 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5339
5340 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5341 if (rc) {
5342 dma_free_coherent(&phba->pcidev->dev, dma_size,
5343 dmabuf->virt, dmabuf->phys);
James Smartdef9c7a2009-12-21 17:02:28 -05005344 kfree(dmabuf);
James Smartda0436e2009-05-22 14:51:39 -04005345 return -EIO;
5346 }
5347
James Smartda0436e2009-05-22 14:51:39 -04005348 /*
5349 * The available vpd length cannot be bigger than the
5350 * DMA buffer passed to the port. Catch the less than
5351 * case and update the caller's size.
5352 */
5353 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5354 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5355
James Smartd7c47992010-06-08 18:31:54 -04005356 memcpy(vpd, dmabuf->virt, *vpd_size);
5357
James Smartda0436e2009-05-22 14:51:39 -04005358 dma_free_coherent(&phba->pcidev->dev, dma_size,
5359 dmabuf->virt, dmabuf->phys);
5360 kfree(dmabuf);
5361 return 0;
5362}
5363
5364/**
James Smartb3b4f3e2019-03-12 16:30:23 -07005365 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
James Smartcd1c8302011-10-10 21:33:25 -04005366 * @phba: pointer to lpfc hba data structure.
5367 *
5368 * This routine retrieves SLI4 device physical port name this PCI function
5369 * is attached to.
5370 *
5371 * Return codes
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005372 * 0 - successful
James Smartb3b4f3e2019-03-12 16:30:23 -07005373 * otherwise - failed to retrieve controller attributes
James Smartcd1c8302011-10-10 21:33:25 -04005374 **/
5375static int
James Smartb3b4f3e2019-03-12 16:30:23 -07005376lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
James Smartcd1c8302011-10-10 21:33:25 -04005377{
5378 LPFC_MBOXQ_t *mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04005379 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5380 struct lpfc_controller_attribute *cntl_attr;
James Smartcd1c8302011-10-10 21:33:25 -04005381 void *virtaddr = NULL;
5382 uint32_t alloclen, reqlen;
5383 uint32_t shdr_status, shdr_add_status;
5384 union lpfc_sli4_cfg_shdr *shdr;
James Smartcd1c8302011-10-10 21:33:25 -04005385 int rc;
5386
James Smartcd1c8302011-10-10 21:33:25 -04005387 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5388 if (!mboxq)
5389 return -ENOMEM;
James Smartcd1c8302011-10-10 21:33:25 -04005390
James Smartb3b4f3e2019-03-12 16:30:23 -07005391 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
James Smartcd1c8302011-10-10 21:33:25 -04005392 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5393 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5394 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5395 LPFC_SLI4_MBX_NEMBED);
James Smartb3b4f3e2019-03-12 16:30:23 -07005396
James Smartcd1c8302011-10-10 21:33:25 -04005397 if (alloclen < reqlen) {
5398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5399 "3084 Allocated DMA memory size (%d) is "
5400 "less than the requested DMA memory size "
5401 "(%d)\n", alloclen, reqlen);
5402 rc = -ENOMEM;
5403 goto out_free_mboxq;
5404 }
5405 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5406 virtaddr = mboxq->sge_array->addr[0];
5407 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5408 shdr = &mbx_cntl_attr->cfg_shdr;
5409 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5410 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5411 if (shdr_status || shdr_add_status || rc) {
5412 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5413 "3085 Mailbox x%x (x%x/x%x) failed, "
5414 "rc:x%x, status:x%x, add_status:x%x\n",
5415 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5416 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5417 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5418 rc, shdr_status, shdr_add_status);
5419 rc = -ENXIO;
5420 goto out_free_mboxq;
5421 }
James Smartb3b4f3e2019-03-12 16:30:23 -07005422
James Smartcd1c8302011-10-10 21:33:25 -04005423 cntl_attr = &mbx_cntl_attr->cntl_attr;
5424 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5425 phba->sli4_hba.lnk_info.lnk_tp =
5426 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5427 phba->sli4_hba.lnk_info.lnk_no =
5428 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
James Smartb3b4f3e2019-03-12 16:30:23 -07005429
5430 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5431 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5432 sizeof(phba->BIOSVersion));
5433
James Smartcd1c8302011-10-10 21:33:25 -04005434 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartb3b4f3e2019-03-12 16:30:23 -07005435 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
James Smartcd1c8302011-10-10 21:33:25 -04005436 phba->sli4_hba.lnk_info.lnk_tp,
James Smartb3b4f3e2019-03-12 16:30:23 -07005437 phba->sli4_hba.lnk_info.lnk_no,
5438 phba->BIOSVersion);
5439out_free_mboxq:
5440 if (rc != MBX_TIMEOUT) {
5441 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5442 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5443 else
5444 mempool_free(mboxq, phba->mbox_mem_pool);
5445 }
5446 return rc;
5447}
5448
5449/**
5450 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5451 * @phba: pointer to lpfc hba data structure.
5452 *
5453 * This routine retrieves SLI4 device physical port name this PCI function
5454 * is attached to.
5455 *
5456 * Return codes
5457 * 0 - successful
5458 * otherwise - failed to retrieve physical port name
5459 **/
5460static int
5461lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5462{
5463 LPFC_MBOXQ_t *mboxq;
5464 struct lpfc_mbx_get_port_name *get_port_name;
5465 uint32_t shdr_status, shdr_add_status;
5466 union lpfc_sli4_cfg_shdr *shdr;
5467 char cport_name = 0;
5468 int rc;
5469
5470 /* We assume nothing at this point */
5471 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5472 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5473
5474 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5475 if (!mboxq)
5476 return -ENOMEM;
5477 /* obtain link type and link number via READ_CONFIG */
5478 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5479 lpfc_sli4_read_config(phba);
5480 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5481 goto retrieve_ppname;
5482
5483 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5484 rc = lpfc_sli4_get_ctl_attr(phba);
5485 if (rc)
5486 goto out_free_mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04005487
5488retrieve_ppname:
5489 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5490 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5491 sizeof(struct lpfc_mbx_get_port_name) -
5492 sizeof(struct lpfc_sli4_cfg_mhdr),
5493 LPFC_SLI4_MBX_EMBED);
5494 get_port_name = &mboxq->u.mqe.un.get_port_name;
5495 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5496 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5497 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5498 phba->sli4_hba.lnk_info.lnk_tp);
5499 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5500 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5501 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5502 if (shdr_status || shdr_add_status || rc) {
5503 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5504 "3087 Mailbox x%x (x%x/x%x) failed: "
5505 "rc:x%x, status:x%x, add_status:x%x\n",
5506 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5507 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5508 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5509 rc, shdr_status, shdr_add_status);
5510 rc = -ENXIO;
5511 goto out_free_mboxq;
5512 }
5513 switch (phba->sli4_hba.lnk_info.lnk_no) {
5514 case LPFC_LINK_NUMBER_0:
5515 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5516 &get_port_name->u.response);
5517 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5518 break;
5519 case LPFC_LINK_NUMBER_1:
5520 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5521 &get_port_name->u.response);
5522 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5523 break;
5524 case LPFC_LINK_NUMBER_2:
5525 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5526 &get_port_name->u.response);
5527 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5528 break;
5529 case LPFC_LINK_NUMBER_3:
5530 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5531 &get_port_name->u.response);
5532 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5533 break;
5534 default:
5535 break;
5536 }
5537
5538 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5539 phba->Port[0] = cport_name;
5540 phba->Port[1] = '\0';
5541 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5542 "3091 SLI get port name: %s\n", phba->Port);
5543 }
5544
5545out_free_mboxq:
5546 if (rc != MBX_TIMEOUT) {
5547 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5548 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5549 else
5550 mempool_free(mboxq, phba->mbox_mem_pool);
5551 }
5552 return rc;
5553}
5554
5555/**
James Smartda0436e2009-05-22 14:51:39 -04005556 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5557 * @phba: pointer to lpfc hba data structure.
5558 *
5559 * This routine is called to explicitly arm the SLI4 device's completion and
5560 * event queues
5561 **/
5562static void
5563lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5564{
James Smart895427b2017-02-12 13:52:30 -08005565 int qidx;
James Smartb71413d2018-02-22 08:18:40 -08005566 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
James Smartcdb42be2019-01-28 11:14:21 -08005567 struct lpfc_sli4_hdw_queue *qp;
James Smart657add42019-05-21 17:49:06 -07005568 struct lpfc_queue *eq;
James Smartda0436e2009-05-22 14:51:39 -04005569
James Smart32517fc2019-01-28 11:14:33 -08005570 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5571 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
James Smartb71413d2018-02-22 08:18:40 -08005572 if (sli4_hba->nvmels_cq)
James Smart32517fc2019-01-28 11:14:33 -08005573 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5574 LPFC_QUEUE_REARM);
James Smart895427b2017-02-12 13:52:30 -08005575
James Smartcdb42be2019-01-28 11:14:21 -08005576 if (sli4_hba->hdwq) {
James Smart657add42019-05-21 17:49:06 -07005577 /* Loop thru all Hardware Queues */
James Smartcdb42be2019-01-28 11:14:21 -08005578 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
James Smart657add42019-05-21 17:49:06 -07005579 qp = &sli4_hba->hdwq[qidx];
5580 /* ARM the corresponding CQ */
James Smart01f2ef62019-08-28 16:19:11 -07005581 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
James Smartc00f62e2019-08-14 16:57:11 -07005582 LPFC_QUEUE_REARM);
James Smartcdb42be2019-01-28 11:14:21 -08005583 }
James Smart895427b2017-02-12 13:52:30 -08005584
James Smart657add42019-05-21 17:49:06 -07005585 /* Loop thru all IRQ vectors */
5586 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5587 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5588 /* ARM the corresponding EQ */
5589 sli4_hba->sli4_write_eq_db(phba, eq,
5590 0, LPFC_QUEUE_REARM);
5591 }
James Smartcdb42be2019-01-28 11:14:21 -08005592 }
James Smart1ba981f2014-02-20 09:56:45 -05005593
James Smart2d7dbc42017-02-12 13:52:35 -08005594 if (phba->nvmet_support) {
5595 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
James Smart32517fc2019-01-28 11:14:33 -08005596 sli4_hba->sli4_write_cq_db(phba,
5597 sli4_hba->nvmet_cqset[qidx], 0,
James Smart2d7dbc42017-02-12 13:52:35 -08005598 LPFC_QUEUE_REARM);
5599 }
James Smart2e90f4b2011-12-13 13:22:37 -05005600 }
James Smartda0436e2009-05-22 14:51:39 -04005601}
5602
5603/**
James Smart6d368e52011-05-24 11:44:12 -04005604 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5605 * @phba: Pointer to HBA context object.
5606 * @type: The resource extent type.
James Smartb76f2dc2011-07-22 18:37:42 -04005607 * @extnt_count: buffer to hold port available extent count.
5608 * @extnt_size: buffer to hold element count per extent.
James Smart6d368e52011-05-24 11:44:12 -04005609 *
James Smartb76f2dc2011-07-22 18:37:42 -04005610 * This function calls the port and retrievs the number of available
5611 * extents and their size for a particular extent type.
5612 *
5613 * Returns: 0 if successful. Nonzero otherwise.
James Smart6d368e52011-05-24 11:44:12 -04005614 **/
James Smartb76f2dc2011-07-22 18:37:42 -04005615int
James Smart6d368e52011-05-24 11:44:12 -04005616lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5617 uint16_t *extnt_count, uint16_t *extnt_size)
5618{
5619 int rc = 0;
5620 uint32_t length;
5621 uint32_t mbox_tmo;
5622 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5623 LPFC_MBOXQ_t *mbox;
5624
5625 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5626 if (!mbox)
5627 return -ENOMEM;
5628
5629 /* Find out how many extents are available for this resource type */
5630 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5631 sizeof(struct lpfc_sli4_cfg_mhdr));
5632 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5633 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5634 length, LPFC_SLI4_MBX_EMBED);
5635
5636 /* Send an extents count of 0 - the GET doesn't use it. */
5637 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5638 LPFC_SLI4_MBX_EMBED);
5639 if (unlikely(rc)) {
5640 rc = -EIO;
5641 goto err_exit;
5642 }
5643
5644 if (!phba->sli4_hba.intr_enable)
5645 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5646 else {
James Smarta183a152011-10-10 21:32:43 -04005647 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005648 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5649 }
5650 if (unlikely(rc)) {
5651 rc = -EIO;
5652 goto err_exit;
5653 }
5654
5655 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5656 if (bf_get(lpfc_mbox_hdr_status,
5657 &rsrc_info->header.cfg_shdr.response)) {
5658 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5659 "2930 Failed to get resource extents "
5660 "Status 0x%x Add'l Status 0x%x\n",
5661 bf_get(lpfc_mbox_hdr_status,
5662 &rsrc_info->header.cfg_shdr.response),
5663 bf_get(lpfc_mbox_hdr_add_status,
5664 &rsrc_info->header.cfg_shdr.response));
5665 rc = -EIO;
5666 goto err_exit;
5667 }
5668
5669 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5670 &rsrc_info->u.rsp);
5671 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5672 &rsrc_info->u.rsp);
James Smart8a9d2e82012-05-09 21:16:12 -04005673
5674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5675 "3162 Retrieved extents type-%d from port: count:%d, "
5676 "size:%d\n", type, *extnt_count, *extnt_size);
5677
5678err_exit:
James Smart6d368e52011-05-24 11:44:12 -04005679 mempool_free(mbox, phba->mbox_mem_pool);
5680 return rc;
5681}
5682
5683/**
5684 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5685 * @phba: Pointer to HBA context object.
5686 * @type: The extent type to check.
5687 *
5688 * This function reads the current available extents from the port and checks
5689 * if the extent count or extent size has changed since the last access.
5690 * Callers use this routine post port reset to understand if there is a
5691 * extent reprovisioning requirement.
5692 *
5693 * Returns:
5694 * -Error: error indicates problem.
5695 * 1: Extent count or size has changed.
5696 * 0: No changes.
5697 **/
5698static int
5699lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5700{
5701 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5702 uint16_t size_diff, rsrc_ext_size;
5703 int rc = 0;
5704 struct lpfc_rsrc_blks *rsrc_entry;
5705 struct list_head *rsrc_blk_list = NULL;
5706
5707 size_diff = 0;
5708 curr_ext_cnt = 0;
5709 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5710 &rsrc_ext_cnt,
5711 &rsrc_ext_size);
5712 if (unlikely(rc))
5713 return -EIO;
5714
5715 switch (type) {
5716 case LPFC_RSC_TYPE_FCOE_RPI:
5717 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5718 break;
5719 case LPFC_RSC_TYPE_FCOE_VPI:
5720 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5721 break;
5722 case LPFC_RSC_TYPE_FCOE_XRI:
5723 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5724 break;
5725 case LPFC_RSC_TYPE_FCOE_VFI:
5726 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5727 break;
5728 default:
5729 break;
5730 }
5731
5732 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5733 curr_ext_cnt++;
5734 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5735 size_diff++;
5736 }
5737
5738 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5739 rc = 1;
5740
5741 return rc;
5742}
5743
5744/**
5745 * lpfc_sli4_cfg_post_extnts -
5746 * @phba: Pointer to HBA context object.
5747 * @extnt_cnt - number of available extents.
5748 * @type - the extent type (rpi, xri, vfi, vpi).
5749 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5750 * @mbox - pointer to the caller's allocated mailbox structure.
5751 *
5752 * This function executes the extents allocation request. It also
5753 * takes care of the amount of memory needed to allocate or get the
5754 * allocated extents. It is the caller's responsibility to evaluate
5755 * the response.
5756 *
5757 * Returns:
5758 * -Error: Error value describes the condition found.
5759 * 0: if successful
5760 **/
5761static int
James Smart8a9d2e82012-05-09 21:16:12 -04005762lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
James Smart6d368e52011-05-24 11:44:12 -04005763 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5764{
5765 int rc = 0;
5766 uint32_t req_len;
5767 uint32_t emb_len;
5768 uint32_t alloc_len, mbox_tmo;
5769
5770 /* Calculate the total requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -04005771 req_len = extnt_cnt * sizeof(uint16_t);
James Smart6d368e52011-05-24 11:44:12 -04005772
5773 /*
5774 * Calculate the size of an embedded mailbox. The uint32_t
5775 * accounts for extents-specific word.
5776 */
5777 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5778 sizeof(uint32_t);
5779
5780 /*
5781 * Presume the allocation and response will fit into an embedded
5782 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5783 */
5784 *emb = LPFC_SLI4_MBX_EMBED;
5785 if (req_len > emb_len) {
James Smart8a9d2e82012-05-09 21:16:12 -04005786 req_len = extnt_cnt * sizeof(uint16_t) +
James Smart6d368e52011-05-24 11:44:12 -04005787 sizeof(union lpfc_sli4_cfg_shdr) +
5788 sizeof(uint32_t);
5789 *emb = LPFC_SLI4_MBX_NEMBED;
5790 }
5791
5792 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5793 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5794 req_len, *emb);
5795 if (alloc_len < req_len) {
5796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartb76f2dc2011-07-22 18:37:42 -04005797 "2982 Allocated DMA memory size (x%x) is "
James Smart6d368e52011-05-24 11:44:12 -04005798 "less than the requested DMA memory "
5799 "size (x%x)\n", alloc_len, req_len);
5800 return -ENOMEM;
5801 }
James Smart8a9d2e82012-05-09 21:16:12 -04005802 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
James Smart6d368e52011-05-24 11:44:12 -04005803 if (unlikely(rc))
5804 return -EIO;
5805
5806 if (!phba->sli4_hba.intr_enable)
5807 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5808 else {
James Smarta183a152011-10-10 21:32:43 -04005809 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005810 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5811 }
5812
5813 if (unlikely(rc))
5814 rc = -EIO;
5815 return rc;
5816}
5817
5818/**
5819 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5820 * @phba: Pointer to HBA context object.
5821 * @type: The resource extent type to allocate.
5822 *
5823 * This function allocates the number of elements for the specified
5824 * resource type.
5825 **/
5826static int
5827lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5828{
5829 bool emb = false;
5830 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5831 uint16_t rsrc_id, rsrc_start, j, k;
5832 uint16_t *ids;
5833 int i, rc;
5834 unsigned long longs;
5835 unsigned long *bmask;
5836 struct lpfc_rsrc_blks *rsrc_blks;
5837 LPFC_MBOXQ_t *mbox;
5838 uint32_t length;
5839 struct lpfc_id_range *id_array = NULL;
5840 void *virtaddr = NULL;
5841 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5842 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5843 struct list_head *ext_blk_list;
5844
5845 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5846 &rsrc_cnt,
5847 &rsrc_size);
5848 if (unlikely(rc))
5849 return -EIO;
5850
5851 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5852 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5853 "3009 No available Resource Extents "
5854 "for resource type 0x%x: Count: 0x%x, "
5855 "Size 0x%x\n", type, rsrc_cnt,
5856 rsrc_size);
5857 return -ENOMEM;
5858 }
5859
James Smart8a9d2e82012-05-09 21:16:12 -04005860 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5861 "2903 Post resource extents type-0x%x: "
5862 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
James Smart6d368e52011-05-24 11:44:12 -04005863
5864 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5865 if (!mbox)
5866 return -ENOMEM;
5867
James Smart8a9d2e82012-05-09 21:16:12 -04005868 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005869 if (unlikely(rc)) {
5870 rc = -EIO;
5871 goto err_exit;
5872 }
5873
5874 /*
5875 * Figure out where the response is located. Then get local pointers
5876 * to the response data. The port does not guarantee to respond to
5877 * all extents counts request so update the local variable with the
5878 * allocated count from the port.
5879 */
5880 if (emb == LPFC_SLI4_MBX_EMBED) {
5881 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5882 id_array = &rsrc_ext->u.rsp.id[0];
5883 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5884 } else {
5885 virtaddr = mbox->sge_array->addr[0];
5886 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5887 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5888 id_array = &n_rsrc->id;
5889 }
5890
5891 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5892 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5893
5894 /*
5895 * Based on the resource size and count, correct the base and max
5896 * resource values.
5897 */
5898 length = sizeof(struct lpfc_rsrc_blks);
5899 switch (type) {
5900 case LPFC_RSC_TYPE_FCOE_RPI:
Kees Cook6396bb22018-06-12 14:03:40 -07005901 phba->sli4_hba.rpi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04005902 sizeof(unsigned long),
5903 GFP_KERNEL);
5904 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5905 rc = -ENOMEM;
5906 goto err_exit;
5907 }
Kees Cook6396bb22018-06-12 14:03:40 -07005908 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
James Smart6d368e52011-05-24 11:44:12 -04005909 sizeof(uint16_t),
5910 GFP_KERNEL);
5911 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5912 kfree(phba->sli4_hba.rpi_bmask);
5913 rc = -ENOMEM;
5914 goto err_exit;
5915 }
5916
5917 /*
5918 * The next_rpi was initialized with the maximum available
5919 * count but the port may allocate a smaller number. Catch
5920 * that case and update the next_rpi.
5921 */
5922 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5923
5924 /* Initialize local ptrs for common extent processing later. */
5925 bmask = phba->sli4_hba.rpi_bmask;
5926 ids = phba->sli4_hba.rpi_ids;
5927 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5928 break;
5929 case LPFC_RSC_TYPE_FCOE_VPI:
Kees Cook6396bb22018-06-12 14:03:40 -07005930 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
James Smart6d368e52011-05-24 11:44:12 -04005931 GFP_KERNEL);
5932 if (unlikely(!phba->vpi_bmask)) {
5933 rc = -ENOMEM;
5934 goto err_exit;
5935 }
Kees Cook6396bb22018-06-12 14:03:40 -07005936 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04005937 GFP_KERNEL);
5938 if (unlikely(!phba->vpi_ids)) {
5939 kfree(phba->vpi_bmask);
5940 rc = -ENOMEM;
5941 goto err_exit;
5942 }
5943
5944 /* Initialize local ptrs for common extent processing later. */
5945 bmask = phba->vpi_bmask;
5946 ids = phba->vpi_ids;
5947 ext_blk_list = &phba->lpfc_vpi_blk_list;
5948 break;
5949 case LPFC_RSC_TYPE_FCOE_XRI:
Kees Cook6396bb22018-06-12 14:03:40 -07005950 phba->sli4_hba.xri_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04005951 sizeof(unsigned long),
5952 GFP_KERNEL);
5953 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5954 rc = -ENOMEM;
5955 goto err_exit;
5956 }
James Smart8a9d2e82012-05-09 21:16:12 -04005957 phba->sli4_hba.max_cfg_param.xri_used = 0;
Kees Cook6396bb22018-06-12 14:03:40 -07005958 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
James Smart6d368e52011-05-24 11:44:12 -04005959 sizeof(uint16_t),
5960 GFP_KERNEL);
5961 if (unlikely(!phba->sli4_hba.xri_ids)) {
5962 kfree(phba->sli4_hba.xri_bmask);
5963 rc = -ENOMEM;
5964 goto err_exit;
5965 }
5966
5967 /* Initialize local ptrs for common extent processing later. */
5968 bmask = phba->sli4_hba.xri_bmask;
5969 ids = phba->sli4_hba.xri_ids;
5970 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5971 break;
5972 case LPFC_RSC_TYPE_FCOE_VFI:
Kees Cook6396bb22018-06-12 14:03:40 -07005973 phba->sli4_hba.vfi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04005974 sizeof(unsigned long),
5975 GFP_KERNEL);
5976 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5977 rc = -ENOMEM;
5978 goto err_exit;
5979 }
Kees Cook6396bb22018-06-12 14:03:40 -07005980 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
James Smart6d368e52011-05-24 11:44:12 -04005981 sizeof(uint16_t),
5982 GFP_KERNEL);
5983 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5984 kfree(phba->sli4_hba.vfi_bmask);
5985 rc = -ENOMEM;
5986 goto err_exit;
5987 }
5988
5989 /* Initialize local ptrs for common extent processing later. */
5990 bmask = phba->sli4_hba.vfi_bmask;
5991 ids = phba->sli4_hba.vfi_ids;
5992 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5993 break;
5994 default:
5995 /* Unsupported Opcode. Fail call. */
5996 id_array = NULL;
5997 bmask = NULL;
5998 ids = NULL;
5999 ext_blk_list = NULL;
6000 goto err_exit;
6001 }
6002
6003 /*
6004 * Complete initializing the extent configuration with the
6005 * allocated ids assigned to this function. The bitmask serves
6006 * as an index into the array and manages the available ids. The
6007 * array just stores the ids communicated to the port via the wqes.
6008 */
6009 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6010 if ((i % 2) == 0)
6011 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6012 &id_array[k]);
6013 else
6014 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6015 &id_array[k]);
6016
6017 rsrc_blks = kzalloc(length, GFP_KERNEL);
6018 if (unlikely(!rsrc_blks)) {
6019 rc = -ENOMEM;
6020 kfree(bmask);
6021 kfree(ids);
6022 goto err_exit;
6023 }
6024 rsrc_blks->rsrc_start = rsrc_id;
6025 rsrc_blks->rsrc_size = rsrc_size;
6026 list_add_tail(&rsrc_blks->list, ext_blk_list);
6027 rsrc_start = rsrc_id;
James Smart895427b2017-02-12 13:52:30 -08006028 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
James Smart5e5b5112019-01-28 11:14:22 -08006029 phba->sli4_hba.io_xri_start = rsrc_start +
James Smart895427b2017-02-12 13:52:30 -08006030 lpfc_sli4_get_iocb_cnt(phba);
James Smart895427b2017-02-12 13:52:30 -08006031 }
James Smart6d368e52011-05-24 11:44:12 -04006032
6033 while (rsrc_id < (rsrc_start + rsrc_size)) {
6034 ids[j] = rsrc_id;
6035 rsrc_id++;
6036 j++;
6037 }
6038 /* Entire word processed. Get next word.*/
6039 if ((i % 2) == 1)
6040 k++;
6041 }
6042 err_exit:
6043 lpfc_sli4_mbox_cmd_free(phba, mbox);
6044 return rc;
6045}
6046
James Smart895427b2017-02-12 13:52:30 -08006047
6048
James Smart6d368e52011-05-24 11:44:12 -04006049/**
6050 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6051 * @phba: Pointer to HBA context object.
6052 * @type: the extent's type.
6053 *
6054 * This function deallocates all extents of a particular resource type.
6055 * SLI4 does not allow for deallocating a particular extent range. It
6056 * is the caller's responsibility to release all kernel memory resources.
6057 **/
6058static int
6059lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6060{
6061 int rc;
6062 uint32_t length, mbox_tmo = 0;
6063 LPFC_MBOXQ_t *mbox;
6064 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6065 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6066
6067 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6068 if (!mbox)
6069 return -ENOMEM;
6070
6071 /*
6072 * This function sends an embedded mailbox because it only sends the
6073 * the resource type. All extents of this type are released by the
6074 * port.
6075 */
6076 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6077 sizeof(struct lpfc_sli4_cfg_mhdr));
6078 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6079 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6080 length, LPFC_SLI4_MBX_EMBED);
6081
6082 /* Send an extents count of 0 - the dealloc doesn't use it. */
6083 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6084 LPFC_SLI4_MBX_EMBED);
6085 if (unlikely(rc)) {
6086 rc = -EIO;
6087 goto out_free_mbox;
6088 }
6089 if (!phba->sli4_hba.intr_enable)
6090 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6091 else {
James Smarta183a152011-10-10 21:32:43 -04006092 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04006093 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6094 }
6095 if (unlikely(rc)) {
6096 rc = -EIO;
6097 goto out_free_mbox;
6098 }
6099
6100 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6101 if (bf_get(lpfc_mbox_hdr_status,
6102 &dealloc_rsrc->header.cfg_shdr.response)) {
6103 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6104 "2919 Failed to release resource extents "
6105 "for type %d - Status 0x%x Add'l Status 0x%x. "
6106 "Resource memory not released.\n",
6107 type,
6108 bf_get(lpfc_mbox_hdr_status,
6109 &dealloc_rsrc->header.cfg_shdr.response),
6110 bf_get(lpfc_mbox_hdr_add_status,
6111 &dealloc_rsrc->header.cfg_shdr.response));
6112 rc = -EIO;
6113 goto out_free_mbox;
6114 }
6115
6116 /* Release kernel memory resources for the specific type. */
6117 switch (type) {
6118 case LPFC_RSC_TYPE_FCOE_VPI:
6119 kfree(phba->vpi_bmask);
6120 kfree(phba->vpi_ids);
6121 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6122 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6123 &phba->lpfc_vpi_blk_list, list) {
6124 list_del_init(&rsrc_blk->list);
6125 kfree(rsrc_blk);
6126 }
James Smart16a3a202013-04-17 20:14:38 -04006127 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006128 break;
6129 case LPFC_RSC_TYPE_FCOE_XRI:
6130 kfree(phba->sli4_hba.xri_bmask);
6131 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04006132 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6133 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6134 list_del_init(&rsrc_blk->list);
6135 kfree(rsrc_blk);
6136 }
6137 break;
6138 case LPFC_RSC_TYPE_FCOE_VFI:
6139 kfree(phba->sli4_hba.vfi_bmask);
6140 kfree(phba->sli4_hba.vfi_ids);
6141 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6142 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6143 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6144 list_del_init(&rsrc_blk->list);
6145 kfree(rsrc_blk);
6146 }
6147 break;
6148 case LPFC_RSC_TYPE_FCOE_RPI:
6149 /* RPI bitmask and physical id array are cleaned up earlier. */
6150 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6151 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6152 list_del_init(&rsrc_blk->list);
6153 kfree(rsrc_blk);
6154 }
6155 break;
6156 default:
6157 break;
6158 }
6159
6160 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6161
6162 out_free_mbox:
6163 mempool_free(mbox, phba->mbox_mem_pool);
6164 return rc;
6165}
6166
Baoyou Xiebd4b3e52016-09-25 13:44:55 +08006167static void
James Smart7bdedb32016-07-06 12:36:00 -07006168lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6169 uint32_t feature)
James Smart65791f12016-07-06 12:35:56 -07006170{
James Smart65791f12016-07-06 12:35:56 -07006171 uint32_t len;
James Smart65791f12016-07-06 12:35:56 -07006172
James Smart65791f12016-07-06 12:35:56 -07006173 len = sizeof(struct lpfc_mbx_set_feature) -
6174 sizeof(struct lpfc_sli4_cfg_mhdr);
6175 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6176 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6177 LPFC_SLI4_MBX_EMBED);
James Smart65791f12016-07-06 12:35:56 -07006178
James Smart7bdedb32016-07-06 12:36:00 -07006179 switch (feature) {
6180 case LPFC_SET_UE_RECOVERY:
6181 bf_set(lpfc_mbx_set_feature_UER,
6182 &mbox->u.mqe.un.set_feature, 1);
6183 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6184 mbox->u.mqe.un.set_feature.param_len = 8;
6185 break;
6186 case LPFC_SET_MDS_DIAGS:
6187 bf_set(lpfc_mbx_set_feature_mds,
6188 &mbox->u.mqe.un.set_feature, 1);
6189 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
James Smartae9e28f2017-05-15 15:20:51 -07006190 &mbox->u.mqe.un.set_feature, 1);
James Smart7bdedb32016-07-06 12:36:00 -07006191 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6192 mbox->u.mqe.un.set_feature.param_len = 8;
6193 break;
James Smart171f6c42019-11-04 16:57:07 -08006194 case LPFC_SET_DUAL_DUMP:
6195 bf_set(lpfc_mbx_set_feature_dd,
6196 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6197 bf_set(lpfc_mbx_set_feature_ddquery,
6198 &mbox->u.mqe.un.set_feature, 0);
6199 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6200 mbox->u.mqe.un.set_feature.param_len = 4;
6201 break;
James Smart65791f12016-07-06 12:35:56 -07006202 }
James Smart7bdedb32016-07-06 12:36:00 -07006203
6204 return;
James Smart65791f12016-07-06 12:35:56 -07006205}
6206
James Smart6d368e52011-05-24 11:44:12 -04006207/**
James Smart1165a5c2018-11-29 16:09:39 -08006208 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6209 * @phba: Pointer to HBA context object.
6210 *
6211 * Disable FW logging into host memory on the adapter. To
6212 * be done before reading logs from the host memory.
6213 **/
6214void
6215lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6216{
6217 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6218
James Smart95bfc6d2019-10-18 14:18:27 -07006219 spin_lock_irq(&phba->hbalock);
6220 ras_fwlog->state = INACTIVE;
6221 spin_unlock_irq(&phba->hbalock);
James Smart1165a5c2018-11-29 16:09:39 -08006222
6223 /* Disable FW logging to host memory */
6224 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6225 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
James Smart95bfc6d2019-10-18 14:18:27 -07006226
6227 /* Wait 10ms for firmware to stop using DMA buffer */
6228 usleep_range(10 * 1000, 20 * 1000);
James Smart1165a5c2018-11-29 16:09:39 -08006229}
6230
6231/**
James Smartd2cc9bc2018-09-10 10:30:50 -07006232 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6233 * @phba: Pointer to HBA context object.
6234 *
6235 * This function is called to free memory allocated for RAS FW logging
6236 * support in the driver.
6237 **/
6238void
6239lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6240{
6241 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6242 struct lpfc_dmabuf *dmabuf, *next;
6243
6244 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6245 list_for_each_entry_safe(dmabuf, next,
6246 &ras_fwlog->fwlog_buff_list,
6247 list) {
6248 list_del(&dmabuf->list);
6249 dma_free_coherent(&phba->pcidev->dev,
6250 LPFC_RAS_MAX_ENTRY_SIZE,
6251 dmabuf->virt, dmabuf->phys);
6252 kfree(dmabuf);
6253 }
6254 }
6255
6256 if (ras_fwlog->lwpd.virt) {
6257 dma_free_coherent(&phba->pcidev->dev,
6258 sizeof(uint32_t) * 2,
6259 ras_fwlog->lwpd.virt,
6260 ras_fwlog->lwpd.phys);
6261 ras_fwlog->lwpd.virt = NULL;
6262 }
6263
James Smart95bfc6d2019-10-18 14:18:27 -07006264 spin_lock_irq(&phba->hbalock);
6265 ras_fwlog->state = INACTIVE;
6266 spin_unlock_irq(&phba->hbalock);
James Smartd2cc9bc2018-09-10 10:30:50 -07006267}
6268
6269/**
6270 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6271 * @phba: Pointer to HBA context object.
6272 * @fwlog_buff_count: Count of buffers to be created.
6273 *
6274 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6275 * to update FW log is posted to the adapter.
6276 * Buffer count is calculated based on module param ras_fwlog_buffsize
6277 * Size of each buffer posted to FW is 64K.
6278 **/
6279
6280static int
6281lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6282 uint32_t fwlog_buff_count)
6283{
6284 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6285 struct lpfc_dmabuf *dmabuf;
6286 int rc = 0, i = 0;
6287
6288 /* Initialize List */
6289 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6290
6291 /* Allocate memory for the LWPD */
6292 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6293 sizeof(uint32_t) * 2,
6294 &ras_fwlog->lwpd.phys,
6295 GFP_KERNEL);
6296 if (!ras_fwlog->lwpd.virt) {
James Smartcb349902018-11-29 16:09:27 -08006297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartd2cc9bc2018-09-10 10:30:50 -07006298 "6185 LWPD Memory Alloc Failed\n");
6299
6300 return -ENOMEM;
6301 }
6302
6303 ras_fwlog->fw_buffcount = fwlog_buff_count;
6304 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6305 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6306 GFP_KERNEL);
6307 if (!dmabuf) {
6308 rc = -ENOMEM;
6309 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6310 "6186 Memory Alloc failed FW logging");
6311 goto free_mem;
6312 }
6313
Luis Chamberlain750afb02019-01-04 09:23:09 +01006314 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
James Smartd2cc9bc2018-09-10 10:30:50 -07006315 LPFC_RAS_MAX_ENTRY_SIZE,
Luis Chamberlain750afb02019-01-04 09:23:09 +01006316 &dmabuf->phys, GFP_KERNEL);
James Smartd2cc9bc2018-09-10 10:30:50 -07006317 if (!dmabuf->virt) {
6318 kfree(dmabuf);
6319 rc = -ENOMEM;
6320 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6321 "6187 DMA Alloc Failed FW logging");
6322 goto free_mem;
6323 }
James Smartd2cc9bc2018-09-10 10:30:50 -07006324 dmabuf->buffer_tag = i;
6325 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6326 }
6327
6328free_mem:
6329 if (rc)
6330 lpfc_sli4_ras_dma_free(phba);
6331
6332 return rc;
6333}
6334
6335/**
6336 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6337 * @phba: pointer to lpfc hba data structure.
6338 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6339 *
6340 * Completion handler for driver's RAS MBX command to the device.
6341 **/
6342static void
6343lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6344{
6345 MAILBOX_t *mb;
6346 union lpfc_sli4_cfg_shdr *shdr;
6347 uint32_t shdr_status, shdr_add_status;
6348 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6349
6350 mb = &pmb->u.mb;
6351
6352 shdr = (union lpfc_sli4_cfg_shdr *)
6353 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6354 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6355 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6356
6357 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
James Smartcb349902018-11-29 16:09:27 -08006358 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
James Smartd2cc9bc2018-09-10 10:30:50 -07006359 "6188 FW LOG mailbox "
6360 "completed with status x%x add_status x%x,"
6361 " mbx status x%x\n",
6362 shdr_status, shdr_add_status, mb->mbxStatus);
James Smartcb349902018-11-29 16:09:27 -08006363
6364 ras_fwlog->ras_hwsupport = false;
James Smartd2cc9bc2018-09-10 10:30:50 -07006365 goto disable_ras;
6366 }
6367
James Smart95bfc6d2019-10-18 14:18:27 -07006368 spin_lock_irq(&phba->hbalock);
6369 ras_fwlog->state = ACTIVE;
6370 spin_unlock_irq(&phba->hbalock);
James Smartd2cc9bc2018-09-10 10:30:50 -07006371 mempool_free(pmb, phba->mbox_mem_pool);
6372
6373 return;
6374
6375disable_ras:
6376 /* Free RAS DMA memory */
6377 lpfc_sli4_ras_dma_free(phba);
6378 mempool_free(pmb, phba->mbox_mem_pool);
6379}
6380
6381/**
6382 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6383 * @phba: pointer to lpfc hba data structure.
6384 * @fwlog_level: Logging verbosity level.
6385 * @fwlog_enable: Enable/Disable logging.
6386 *
6387 * Initialize memory and post mailbox command to enable FW logging in host
6388 * memory.
6389 **/
6390int
6391lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6392 uint32_t fwlog_level,
6393 uint32_t fwlog_enable)
6394{
6395 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6396 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6397 struct lpfc_dmabuf *dmabuf;
6398 LPFC_MBOXQ_t *mbox;
6399 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6400 int rc = 0;
6401
James Smart95bfc6d2019-10-18 14:18:27 -07006402 spin_lock_irq(&phba->hbalock);
6403 ras_fwlog->state = INACTIVE;
6404 spin_unlock_irq(&phba->hbalock);
6405
James Smartd2cc9bc2018-09-10 10:30:50 -07006406 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6407 phba->cfg_ras_fwlog_buffsize);
6408 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6409
6410 /*
6411 * If re-enabling FW logging support use earlier allocated
6412 * DMA buffers while posting MBX command.
6413 **/
6414 if (!ras_fwlog->lwpd.virt) {
6415 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6416 if (rc) {
6417 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
James Smartcb349902018-11-29 16:09:27 -08006418 "6189 FW Log Memory Allocation Failed");
James Smartd2cc9bc2018-09-10 10:30:50 -07006419 return rc;
6420 }
6421 }
6422
6423 /* Setup Mailbox command */
6424 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6425 if (!mbox) {
James Smartcb349902018-11-29 16:09:27 -08006426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartd2cc9bc2018-09-10 10:30:50 -07006427 "6190 RAS MBX Alloc Failed");
6428 rc = -ENOMEM;
6429 goto mem_free;
6430 }
6431
6432 ras_fwlog->fw_loglevel = fwlog_level;
6433 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6434 sizeof(struct lpfc_sli4_cfg_mhdr));
6435
6436 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6437 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6438 len, LPFC_SLI4_MBX_EMBED);
6439
6440 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6441 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6442 fwlog_enable);
6443 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6444 ras_fwlog->fw_loglevel);
6445 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6446 ras_fwlog->fw_buffcount);
6447 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6448 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6449
6450 /* Update DMA buffer address */
6451 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6452 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6453
6454 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6455 putPaddrLow(dmabuf->phys);
6456
6457 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6458 putPaddrHigh(dmabuf->phys);
6459 }
6460
6461 /* Update LPWD address */
6462 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6463 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6464
James Smart95bfc6d2019-10-18 14:18:27 -07006465 spin_lock_irq(&phba->hbalock);
6466 ras_fwlog->state = REG_INPROGRESS;
6467 spin_unlock_irq(&phba->hbalock);
James Smartd2cc9bc2018-09-10 10:30:50 -07006468 mbox->vport = phba->pport;
6469 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6470
6471 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6472
6473 if (rc == MBX_NOT_FINISHED) {
James Smartcb349902018-11-29 16:09:27 -08006474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6475 "6191 FW-Log Mailbox failed. "
James Smartd2cc9bc2018-09-10 10:30:50 -07006476 "status %d mbxStatus : x%x", rc,
6477 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6478 mempool_free(mbox, phba->mbox_mem_pool);
6479 rc = -EIO;
6480 goto mem_free;
6481 } else
6482 rc = 0;
6483mem_free:
6484 if (rc)
6485 lpfc_sli4_ras_dma_free(phba);
6486
6487 return rc;
6488}
6489
6490/**
6491 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6492 * @phba: Pointer to HBA context object.
6493 *
6494 * Check if RAS is supported on the adapter and initialize it.
6495 **/
6496void
6497lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6498{
6499 /* Check RAS FW Log needs to be enabled or not */
6500 if (lpfc_check_fwlog_support(phba))
6501 return;
6502
6503 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6504 LPFC_RAS_ENABLE_LOGGING);
6505}
6506
6507/**
James Smart6d368e52011-05-24 11:44:12 -04006508 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6509 * @phba: Pointer to HBA context object.
6510 *
6511 * This function allocates all SLI4 resource identifiers.
6512 **/
6513int
6514lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6515{
6516 int i, rc, error = 0;
6517 uint16_t count, base;
6518 unsigned long longs;
6519
James Smartff78d8f2011-12-13 13:21:35 -05006520 if (!phba->sli4_hba.rpi_hdrs_in_use)
6521 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart6d368e52011-05-24 11:44:12 -04006522 if (phba->sli4_hba.extents_in_use) {
6523 /*
6524 * The port supports resource extents. The XRI, VPI, VFI, RPI
6525 * resource extent count must be read and allocated before
6526 * provisioning the resource id arrays.
6527 */
6528 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6529 LPFC_IDX_RSRC_RDY) {
6530 /*
6531 * Extent-based resources are set - the driver could
6532 * be in a port reset. Figure out if any corrective
6533 * actions need to be taken.
6534 */
6535 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6536 LPFC_RSC_TYPE_FCOE_VFI);
6537 if (rc != 0)
6538 error++;
6539 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6540 LPFC_RSC_TYPE_FCOE_VPI);
6541 if (rc != 0)
6542 error++;
6543 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6544 LPFC_RSC_TYPE_FCOE_XRI);
6545 if (rc != 0)
6546 error++;
6547 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6548 LPFC_RSC_TYPE_FCOE_RPI);
6549 if (rc != 0)
6550 error++;
6551
6552 /*
6553 * It's possible that the number of resources
6554 * provided to this port instance changed between
6555 * resets. Detect this condition and reallocate
6556 * resources. Otherwise, there is no action.
6557 */
6558 if (error) {
6559 lpfc_printf_log(phba, KERN_INFO,
6560 LOG_MBOX | LOG_INIT,
6561 "2931 Detected extent resource "
6562 "change. Reallocating all "
6563 "extents.\n");
6564 rc = lpfc_sli4_dealloc_extent(phba,
6565 LPFC_RSC_TYPE_FCOE_VFI);
6566 rc = lpfc_sli4_dealloc_extent(phba,
6567 LPFC_RSC_TYPE_FCOE_VPI);
6568 rc = lpfc_sli4_dealloc_extent(phba,
6569 LPFC_RSC_TYPE_FCOE_XRI);
6570 rc = lpfc_sli4_dealloc_extent(phba,
6571 LPFC_RSC_TYPE_FCOE_RPI);
6572 } else
6573 return 0;
6574 }
6575
6576 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6577 if (unlikely(rc))
6578 goto err_exit;
6579
6580 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6581 if (unlikely(rc))
6582 goto err_exit;
6583
6584 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6585 if (unlikely(rc))
6586 goto err_exit;
6587
6588 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6589 if (unlikely(rc))
6590 goto err_exit;
6591 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6592 LPFC_IDX_RSRC_RDY);
6593 return rc;
6594 } else {
6595 /*
6596 * The port does not support resource extents. The XRI, VPI,
6597 * VFI, RPI resource ids were determined from READ_CONFIG.
6598 * Just allocate the bitmasks and provision the resource id
6599 * arrays. If a port reset is active, the resources don't
6600 * need any action - just exit.
6601 */
6602 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
James Smartff78d8f2011-12-13 13:21:35 -05006603 LPFC_IDX_RSRC_RDY) {
6604 lpfc_sli4_dealloc_resource_identifiers(phba);
6605 lpfc_sli4_remove_rpis(phba);
6606 }
James Smart6d368e52011-05-24 11:44:12 -04006607 /* RPIs. */
6608 count = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart0a630c22013-01-03 15:44:09 -05006609 if (count <= 0) {
6610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6611 "3279 Invalid provisioning of "
6612 "rpi:%d\n", count);
6613 rc = -EINVAL;
6614 goto err_exit;
6615 }
James Smart6d368e52011-05-24 11:44:12 -04006616 base = phba->sli4_hba.max_cfg_param.rpi_base;
6617 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006618 phba->sli4_hba.rpi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006619 sizeof(unsigned long),
6620 GFP_KERNEL);
6621 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6622 rc = -ENOMEM;
6623 goto err_exit;
6624 }
Kees Cook6396bb22018-06-12 14:03:40 -07006625 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006626 GFP_KERNEL);
6627 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6628 rc = -ENOMEM;
6629 goto free_rpi_bmask;
6630 }
6631
6632 for (i = 0; i < count; i++)
6633 phba->sli4_hba.rpi_ids[i] = base + i;
6634
6635 /* VPIs. */
6636 count = phba->sli4_hba.max_cfg_param.max_vpi;
James Smart0a630c22013-01-03 15:44:09 -05006637 if (count <= 0) {
6638 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6639 "3280 Invalid provisioning of "
6640 "vpi:%d\n", count);
6641 rc = -EINVAL;
6642 goto free_rpi_ids;
6643 }
James Smart6d368e52011-05-24 11:44:12 -04006644 base = phba->sli4_hba.max_cfg_param.vpi_base;
6645 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006646 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
James Smart6d368e52011-05-24 11:44:12 -04006647 GFP_KERNEL);
6648 if (unlikely(!phba->vpi_bmask)) {
6649 rc = -ENOMEM;
6650 goto free_rpi_ids;
6651 }
Kees Cook6396bb22018-06-12 14:03:40 -07006652 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006653 GFP_KERNEL);
6654 if (unlikely(!phba->vpi_ids)) {
6655 rc = -ENOMEM;
6656 goto free_vpi_bmask;
6657 }
6658
6659 for (i = 0; i < count; i++)
6660 phba->vpi_ids[i] = base + i;
6661
6662 /* XRIs. */
6663 count = phba->sli4_hba.max_cfg_param.max_xri;
James Smart0a630c22013-01-03 15:44:09 -05006664 if (count <= 0) {
6665 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6666 "3281 Invalid provisioning of "
6667 "xri:%d\n", count);
6668 rc = -EINVAL;
6669 goto free_vpi_ids;
6670 }
James Smart6d368e52011-05-24 11:44:12 -04006671 base = phba->sli4_hba.max_cfg_param.xri_base;
6672 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006673 phba->sli4_hba.xri_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006674 sizeof(unsigned long),
6675 GFP_KERNEL);
6676 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6677 rc = -ENOMEM;
6678 goto free_vpi_ids;
6679 }
James Smart41899be2012-03-01 22:34:19 -05006680 phba->sli4_hba.max_cfg_param.xri_used = 0;
Kees Cook6396bb22018-06-12 14:03:40 -07006681 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006682 GFP_KERNEL);
6683 if (unlikely(!phba->sli4_hba.xri_ids)) {
6684 rc = -ENOMEM;
6685 goto free_xri_bmask;
6686 }
6687
6688 for (i = 0; i < count; i++)
6689 phba->sli4_hba.xri_ids[i] = base + i;
6690
6691 /* VFIs. */
6692 count = phba->sli4_hba.max_cfg_param.max_vfi;
James Smart0a630c22013-01-03 15:44:09 -05006693 if (count <= 0) {
6694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6695 "3282 Invalid provisioning of "
6696 "vfi:%d\n", count);
6697 rc = -EINVAL;
6698 goto free_xri_ids;
6699 }
James Smart6d368e52011-05-24 11:44:12 -04006700 base = phba->sli4_hba.max_cfg_param.vfi_base;
6701 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006702 phba->sli4_hba.vfi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006703 sizeof(unsigned long),
6704 GFP_KERNEL);
6705 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6706 rc = -ENOMEM;
6707 goto free_xri_ids;
6708 }
Kees Cook6396bb22018-06-12 14:03:40 -07006709 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006710 GFP_KERNEL);
6711 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6712 rc = -ENOMEM;
6713 goto free_vfi_bmask;
6714 }
6715
6716 for (i = 0; i < count; i++)
6717 phba->sli4_hba.vfi_ids[i] = base + i;
6718
6719 /*
6720 * Mark all resources ready. An HBA reset doesn't need
6721 * to reset the initialization.
6722 */
6723 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6724 LPFC_IDX_RSRC_RDY);
6725 return 0;
6726 }
6727
6728 free_vfi_bmask:
6729 kfree(phba->sli4_hba.vfi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006730 phba->sli4_hba.vfi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006731 free_xri_ids:
6732 kfree(phba->sli4_hba.xri_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006733 phba->sli4_hba.xri_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006734 free_xri_bmask:
6735 kfree(phba->sli4_hba.xri_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006736 phba->sli4_hba.xri_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006737 free_vpi_ids:
6738 kfree(phba->vpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006739 phba->vpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006740 free_vpi_bmask:
6741 kfree(phba->vpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006742 phba->vpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006743 free_rpi_ids:
6744 kfree(phba->sli4_hba.rpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006745 phba->sli4_hba.rpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006746 free_rpi_bmask:
6747 kfree(phba->sli4_hba.rpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006748 phba->sli4_hba.rpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006749 err_exit:
6750 return rc;
6751}
6752
6753/**
6754 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6755 * @phba: Pointer to HBA context object.
6756 *
6757 * This function allocates the number of elements for the specified
6758 * resource type.
6759 **/
6760int
6761lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6762{
6763 if (phba->sli4_hba.extents_in_use) {
6764 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6765 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6766 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6767 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6768 } else {
6769 kfree(phba->vpi_bmask);
James Smart16a3a202013-04-17 20:14:38 -04006770 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006771 kfree(phba->vpi_ids);
6772 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6773 kfree(phba->sli4_hba.xri_bmask);
6774 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04006775 kfree(phba->sli4_hba.vfi_bmask);
6776 kfree(phba->sli4_hba.vfi_ids);
6777 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6778 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6779 }
6780
6781 return 0;
6782}
6783
6784/**
James Smartb76f2dc2011-07-22 18:37:42 -04006785 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6786 * @phba: Pointer to HBA context object.
6787 * @type: The resource extent type.
6788 * @extnt_count: buffer to hold port extent count response
6789 * @extnt_size: buffer to hold port extent size response.
6790 *
6791 * This function calls the port to read the host allocated extents
6792 * for a particular type.
6793 **/
6794int
6795lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6796 uint16_t *extnt_cnt, uint16_t *extnt_size)
6797{
6798 bool emb;
6799 int rc = 0;
6800 uint16_t curr_blks = 0;
6801 uint32_t req_len, emb_len;
6802 uint32_t alloc_len, mbox_tmo;
6803 struct list_head *blk_list_head;
6804 struct lpfc_rsrc_blks *rsrc_blk;
6805 LPFC_MBOXQ_t *mbox;
6806 void *virtaddr = NULL;
6807 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6808 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6809 union lpfc_sli4_cfg_shdr *shdr;
6810
6811 switch (type) {
6812 case LPFC_RSC_TYPE_FCOE_VPI:
6813 blk_list_head = &phba->lpfc_vpi_blk_list;
6814 break;
6815 case LPFC_RSC_TYPE_FCOE_XRI:
6816 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6817 break;
6818 case LPFC_RSC_TYPE_FCOE_VFI:
6819 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6820 break;
6821 case LPFC_RSC_TYPE_FCOE_RPI:
6822 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6823 break;
6824 default:
6825 return -EIO;
6826 }
6827
6828 /* Count the number of extents currently allocatd for this type. */
6829 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6830 if (curr_blks == 0) {
6831 /*
6832 * The GET_ALLOCATED mailbox does not return the size,
6833 * just the count. The size should be just the size
6834 * stored in the current allocated block and all sizes
6835 * for an extent type are the same so set the return
6836 * value now.
6837 */
6838 *extnt_size = rsrc_blk->rsrc_size;
6839 }
6840 curr_blks++;
6841 }
6842
James Smartb76f2dc2011-07-22 18:37:42 -04006843 /*
6844 * Calculate the size of an embedded mailbox. The uint32_t
6845 * accounts for extents-specific word.
6846 */
6847 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6848 sizeof(uint32_t);
6849
6850 /*
6851 * Presume the allocation and response will fit into an embedded
6852 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6853 */
6854 emb = LPFC_SLI4_MBX_EMBED;
6855 req_len = emb_len;
6856 if (req_len > emb_len) {
6857 req_len = curr_blks * sizeof(uint16_t) +
6858 sizeof(union lpfc_sli4_cfg_shdr) +
6859 sizeof(uint32_t);
6860 emb = LPFC_SLI4_MBX_NEMBED;
6861 }
6862
6863 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6864 if (!mbox)
6865 return -ENOMEM;
6866 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6867
6868 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6869 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6870 req_len, emb);
6871 if (alloc_len < req_len) {
6872 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6873 "2983 Allocated DMA memory size (x%x) is "
6874 "less than the requested DMA memory "
6875 "size (x%x)\n", alloc_len, req_len);
6876 rc = -ENOMEM;
6877 goto err_exit;
6878 }
6879 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6880 if (unlikely(rc)) {
6881 rc = -EIO;
6882 goto err_exit;
6883 }
6884
6885 if (!phba->sli4_hba.intr_enable)
6886 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6887 else {
James Smarta183a152011-10-10 21:32:43 -04006888 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smartb76f2dc2011-07-22 18:37:42 -04006889 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6890 }
6891
6892 if (unlikely(rc)) {
6893 rc = -EIO;
6894 goto err_exit;
6895 }
6896
6897 /*
6898 * Figure out where the response is located. Then get local pointers
6899 * to the response data. The port does not guarantee to respond to
6900 * all extents counts request so update the local variable with the
6901 * allocated count from the port.
6902 */
6903 if (emb == LPFC_SLI4_MBX_EMBED) {
6904 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6905 shdr = &rsrc_ext->header.cfg_shdr;
6906 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6907 } else {
6908 virtaddr = mbox->sge_array->addr[0];
6909 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6910 shdr = &n_rsrc->cfg_shdr;
6911 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6912 }
6913
6914 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6915 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6916 "2984 Failed to read allocated resources "
6917 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6918 type,
6919 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6920 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6921 rc = -EIO;
6922 goto err_exit;
6923 }
6924 err_exit:
6925 lpfc_sli4_mbox_cmd_free(phba, mbox);
6926 return rc;
6927}
6928
6929/**
James Smart0ef69962017-04-21 16:04:50 -07006930 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
James Smart8a9d2e82012-05-09 21:16:12 -04006931 * @phba: pointer to lpfc hba data structure.
James Smart895427b2017-02-12 13:52:30 -08006932 * @pring: Pointer to driver SLI ring object.
6933 * @sgl_list: linked link of sgl buffers to post
6934 * @cnt: number of linked list buffers
James Smart8a9d2e82012-05-09 21:16:12 -04006935 *
James Smart895427b2017-02-12 13:52:30 -08006936 * This routine walks the list of buffers that have been allocated and
James Smart8a9d2e82012-05-09 21:16:12 -04006937 * repost them to the port by using SGL block post. This is needed after a
6938 * pci_function_reset/warm_start or start. It attempts to construct blocks
James Smart895427b2017-02-12 13:52:30 -08006939 * of buffer sgls which contains contiguous xris and uses the non-embedded
6940 * SGL block post mailbox commands to post them to the port. For single
James Smart8a9d2e82012-05-09 21:16:12 -04006941 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6942 * mailbox command for posting.
6943 *
6944 * Returns: 0 = success, non-zero failure.
6945 **/
6946static int
James Smart895427b2017-02-12 13:52:30 -08006947lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6948 struct list_head *sgl_list, int cnt)
James Smart8a9d2e82012-05-09 21:16:12 -04006949{
6950 struct lpfc_sglq *sglq_entry = NULL;
6951 struct lpfc_sglq *sglq_entry_next = NULL;
6952 struct lpfc_sglq *sglq_entry_first = NULL;
James Smart895427b2017-02-12 13:52:30 -08006953 int status, total_cnt;
6954 int post_cnt = 0, num_posted = 0, block_cnt = 0;
James Smart8a9d2e82012-05-09 21:16:12 -04006955 int last_xritag = NO_XRI;
6956 LIST_HEAD(prep_sgl_list);
6957 LIST_HEAD(blck_sgl_list);
6958 LIST_HEAD(allc_sgl_list);
6959 LIST_HEAD(post_sgl_list);
6960 LIST_HEAD(free_sgl_list);
6961
James Smart38c20672013-03-01 16:37:44 -05006962 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08006963 spin_lock(&phba->sli4_hba.sgl_list_lock);
6964 list_splice_init(sgl_list, &allc_sgl_list);
6965 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05006966 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04006967
James Smart895427b2017-02-12 13:52:30 -08006968 total_cnt = cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006969 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6970 &allc_sgl_list, list) {
6971 list_del_init(&sglq_entry->list);
6972 block_cnt++;
6973 if ((last_xritag != NO_XRI) &&
6974 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6975 /* a hole in xri block, form a sgl posting block */
6976 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6977 post_cnt = block_cnt - 1;
6978 /* prepare list for next posting block */
6979 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6980 block_cnt = 1;
6981 } else {
6982 /* prepare list for next posting block */
6983 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6984 /* enough sgls for non-embed sgl mbox command */
6985 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6986 list_splice_init(&prep_sgl_list,
6987 &blck_sgl_list);
6988 post_cnt = block_cnt;
6989 block_cnt = 0;
6990 }
6991 }
6992 num_posted++;
6993
6994 /* keep track of last sgl's xritag */
6995 last_xritag = sglq_entry->sli4_xritag;
6996
James Smart895427b2017-02-12 13:52:30 -08006997 /* end of repost sgl list condition for buffers */
6998 if (num_posted == total_cnt) {
James Smart8a9d2e82012-05-09 21:16:12 -04006999 if (post_cnt == 0) {
7000 list_splice_init(&prep_sgl_list,
7001 &blck_sgl_list);
7002 post_cnt = block_cnt;
7003 } else if (block_cnt == 1) {
7004 status = lpfc_sli4_post_sgl(phba,
7005 sglq_entry->phys, 0,
7006 sglq_entry->sli4_xritag);
7007 if (!status) {
7008 /* successful, put sgl to posted list */
7009 list_add_tail(&sglq_entry->list,
7010 &post_sgl_list);
7011 } else {
7012 /* Failure, put sgl to free list */
7013 lpfc_printf_log(phba, KERN_WARNING,
7014 LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08007015 "3159 Failed to post "
James Smart8a9d2e82012-05-09 21:16:12 -04007016 "sgl, xritag:x%x\n",
7017 sglq_entry->sli4_xritag);
7018 list_add_tail(&sglq_entry->list,
7019 &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04007020 total_cnt--;
James Smart8a9d2e82012-05-09 21:16:12 -04007021 }
7022 }
7023 }
7024
7025 /* continue until a nembed page worth of sgls */
7026 if (post_cnt == 0)
7027 continue;
7028
James Smart895427b2017-02-12 13:52:30 -08007029 /* post the buffer list sgls as a block */
7030 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7031 post_cnt);
James Smart8a9d2e82012-05-09 21:16:12 -04007032
7033 if (!status) {
7034 /* success, put sgl list to posted sgl list */
7035 list_splice_init(&blck_sgl_list, &post_sgl_list);
7036 } else {
7037 /* Failure, put sgl list to free sgl list */
7038 sglq_entry_first = list_first_entry(&blck_sgl_list,
7039 struct lpfc_sglq,
7040 list);
7041 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08007042 "3160 Failed to post sgl-list, "
James Smart8a9d2e82012-05-09 21:16:12 -04007043 "xritag:x%x-x%x\n",
7044 sglq_entry_first->sli4_xritag,
7045 (sglq_entry_first->sli4_xritag +
7046 post_cnt - 1));
7047 list_splice_init(&blck_sgl_list, &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04007048 total_cnt -= post_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04007049 }
7050
7051 /* don't reset xirtag due to hole in xri block */
7052 if (block_cnt == 0)
7053 last_xritag = NO_XRI;
7054
James Smart895427b2017-02-12 13:52:30 -08007055 /* reset sgl post count for next round of posting */
James Smart8a9d2e82012-05-09 21:16:12 -04007056 post_cnt = 0;
7057 }
7058
James Smart895427b2017-02-12 13:52:30 -08007059 /* free the sgls failed to post */
James Smart8a9d2e82012-05-09 21:16:12 -04007060 lpfc_free_sgl_list(phba, &free_sgl_list);
7061
James Smart895427b2017-02-12 13:52:30 -08007062 /* push sgls posted to the available list */
James Smart8a9d2e82012-05-09 21:16:12 -04007063 if (!list_empty(&post_sgl_list)) {
James Smart38c20672013-03-01 16:37:44 -05007064 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08007065 spin_lock(&phba->sli4_hba.sgl_list_lock);
7066 list_splice_init(&post_sgl_list, sgl_list);
7067 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05007068 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04007069 } else {
7070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08007071 "3161 Failure to post sgl to port.\n");
James Smart8a9d2e82012-05-09 21:16:12 -04007072 return -EIO;
7073 }
James Smart895427b2017-02-12 13:52:30 -08007074
7075 /* return the number of XRIs actually posted */
7076 return total_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04007077}
7078
James Smart0794d602019-01-28 11:14:19 -08007079/**
James Smart5e5b5112019-01-28 11:14:22 -08007080 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
James Smart0794d602019-01-28 11:14:19 -08007081 * @phba: pointer to lpfc hba data structure.
7082 *
7083 * This routine walks the list of nvme buffers that have been allocated and
7084 * repost them to the port by using SGL block post. This is needed after a
7085 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7086 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
James Smart5e5b5112019-01-28 11:14:22 -08007087 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
James Smart0794d602019-01-28 11:14:19 -08007088 *
7089 * Returns: 0 = success, non-zero failure.
7090 **/
Bart Van Assche3999df72019-03-28 11:06:16 -07007091static int
James Smart5e5b5112019-01-28 11:14:22 -08007092lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
James Smart0794d602019-01-28 11:14:19 -08007093{
7094 LIST_HEAD(post_nblist);
7095 int num_posted, rc = 0;
7096
7097 /* get all NVME buffers need to repost to a local list */
James Smart5e5b5112019-01-28 11:14:22 -08007098 lpfc_io_buf_flush(phba, &post_nblist);
James Smart0794d602019-01-28 11:14:19 -08007099
7100 /* post the list of nvme buffer sgls to port if available */
7101 if (!list_empty(&post_nblist)) {
James Smart5e5b5112019-01-28 11:14:22 -08007102 num_posted = lpfc_sli4_post_io_sgl_list(
7103 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
James Smart0794d602019-01-28 11:14:19 -08007104 /* failed to post any nvme buffer, return error */
7105 if (num_posted == 0)
7106 rc = -EIO;
7107 }
7108 return rc;
7109}
7110
Bart Van Assche3999df72019-03-28 11:06:16 -07007111static void
James Smart61bda8f2016-10-13 15:06:05 -07007112lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7113{
7114 uint32_t len;
7115
7116 len = sizeof(struct lpfc_mbx_set_host_data) -
7117 sizeof(struct lpfc_sli4_cfg_mhdr);
7118 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7119 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7120 LPFC_SLI4_MBX_EMBED);
7121
7122 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
James Smartb2fd1032016-12-19 15:07:21 -08007123 mbox->u.mqe.un.set_host_data.param_len =
7124 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
James Smart61bda8f2016-10-13 15:06:05 -07007125 snprintf(mbox->u.mqe.un.set_host_data.data,
7126 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7127 "Linux %s v"LPFC_DRIVER_VERSION,
7128 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7129}
7130
James Smarta8cf5df2017-05-15 15:20:46 -07007131int
James Smart6c621a22017-05-15 15:20:45 -07007132lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
James Smarta8cf5df2017-05-15 15:20:46 -07007133 struct lpfc_queue *drq, int count, int idx)
James Smart6c621a22017-05-15 15:20:45 -07007134{
7135 int rc, i;
7136 struct lpfc_rqe hrqe;
7137 struct lpfc_rqe drqe;
7138 struct lpfc_rqb *rqbp;
James Smart411de512018-01-30 15:58:52 -08007139 unsigned long flags;
James Smart6c621a22017-05-15 15:20:45 -07007140 struct rqb_dmabuf *rqb_buffer;
7141 LIST_HEAD(rqb_buf_list);
7142
James Smart411de512018-01-30 15:58:52 -08007143 spin_lock_irqsave(&phba->hbalock, flags);
James Smart6c621a22017-05-15 15:20:45 -07007144 rqbp = hrq->rqbp;
7145 for (i = 0; i < count; i++) {
7146 /* IF RQ is already full, don't bother */
7147 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7148 break;
7149 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7150 if (!rqb_buffer)
7151 break;
7152 rqb_buffer->hrq = hrq;
7153 rqb_buffer->drq = drq;
James Smarta8cf5df2017-05-15 15:20:46 -07007154 rqb_buffer->idx = idx;
James Smart6c621a22017-05-15 15:20:45 -07007155 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7156 }
7157 while (!list_empty(&rqb_buf_list)) {
7158 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7159 hbuf.list);
7160
7161 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7162 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7163 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7164 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7165 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7166 if (rc < 0) {
James Smart411de512018-01-30 15:58:52 -08007167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7168 "6421 Cannot post to HRQ %d: %x %x %x "
7169 "DRQ %x %x\n",
7170 hrq->queue_id,
7171 hrq->host_index,
7172 hrq->hba_index,
7173 hrq->entry_count,
7174 drq->host_index,
7175 drq->hba_index);
James Smart6c621a22017-05-15 15:20:45 -07007176 rqbp->rqb_free_buffer(phba, rqb_buffer);
7177 } else {
7178 list_add_tail(&rqb_buffer->hbuf.list,
7179 &rqbp->rqb_buffer_list);
7180 rqbp->buffer_count++;
7181 }
7182 }
James Smart411de512018-01-30 15:58:52 -08007183 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart6c621a22017-05-15 15:20:45 -07007184 return 1;
7185}
7186
James Smart8a9d2e82012-05-09 21:16:12 -04007187/**
Masahiro Yamada183b8022017-02-27 14:29:20 -08007188 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
James Smartda0436e2009-05-22 14:51:39 -04007189 * @phba: Pointer to HBA context object.
7190 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08007191 * This function is the main SLI4 device initialization PCI function. This
7192 * function is called by the HBA initialization code, HBA reset code and
James Smartda0436e2009-05-22 14:51:39 -04007193 * HBA error attention handler code. Caller is not required to hold any
7194 * locks.
7195 **/
7196int
7197lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7198{
James Smart171f6c42019-11-04 16:57:07 -08007199 int rc, i, cnt, len, dd;
James Smartda0436e2009-05-22 14:51:39 -04007200 LPFC_MBOXQ_t *mboxq;
7201 struct lpfc_mqe *mqe;
7202 uint8_t *vpd;
7203 uint32_t vpd_size;
7204 uint32_t ftr_rsp = 0;
7205 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7206 struct lpfc_vport *vport = phba->pport;
7207 struct lpfc_dmabuf *mp;
James Smart2d7dbc42017-02-12 13:52:35 -08007208 struct lpfc_rqb *rqbp;
James Smartda0436e2009-05-22 14:51:39 -04007209
7210 /* Perform a PCI function reset to start from clean */
7211 rc = lpfc_pci_function_reset(phba);
7212 if (unlikely(rc))
7213 return -ENODEV;
7214
7215 /* Check the HBA Host Status Register for readyness */
7216 rc = lpfc_sli4_post_status_check(phba);
7217 if (unlikely(rc))
7218 return -ENODEV;
7219 else {
7220 spin_lock_irq(&phba->hbalock);
7221 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7222 spin_unlock_irq(&phba->hbalock);
7223 }
7224
7225 /*
7226 * Allocate a single mailbox container for initializing the
7227 * port.
7228 */
7229 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7230 if (!mboxq)
7231 return -ENOMEM;
7232
James Smartda0436e2009-05-22 14:51:39 -04007233 /* Issue READ_REV to collect vpd and FW information. */
James Smart49198b32010-04-06 15:04:33 -04007234 vpd_size = SLI4_PAGE_SIZE;
James Smartda0436e2009-05-22 14:51:39 -04007235 vpd = kzalloc(vpd_size, GFP_KERNEL);
7236 if (!vpd) {
7237 rc = -ENOMEM;
7238 goto out_free_mbox;
7239 }
7240
7241 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
James Smart76a95d72010-11-20 23:11:48 -05007242 if (unlikely(rc)) {
7243 kfree(vpd);
7244 goto out_free_mbox;
7245 }
James Smart572709e2013-07-15 18:32:43 -04007246
James Smartda0436e2009-05-22 14:51:39 -04007247 mqe = &mboxq->u.mqe;
James Smartf1126682009-06-10 17:22:44 -04007248 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
James Smartb5c53952016-03-31 14:12:30 -07007249 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
James Smart76a95d72010-11-20 23:11:48 -05007250 phba->hba_flag |= HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07007251 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7252 } else {
James Smart76a95d72010-11-20 23:11:48 -05007253 phba->hba_flag &= ~HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07007254 }
James Smart45ed1192009-10-02 15:17:02 -04007255
7256 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7257 LPFC_DCBX_CEE_MODE)
7258 phba->hba_flag |= HBA_FIP_SUPPORT;
7259 else
7260 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7261
James Smartc00f62e2019-08-14 16:57:11 -07007262 phba->hba_flag &= ~HBA_IOQ_FLUSH;
James Smart4f2e66c2012-05-09 21:17:07 -04007263
James Smartc31098c2011-04-16 11:03:33 -04007264 if (phba->sli_rev != LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -04007265 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7266 "0376 READ_REV Error. SLI Level %d "
7267 "FCoE enabled %d\n",
James Smart76a95d72010-11-20 23:11:48 -05007268 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
James Smartda0436e2009-05-22 14:51:39 -04007269 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05007270 kfree(vpd);
7271 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04007272 }
James Smartcd1c8302011-10-10 21:33:25 -04007273
7274 /*
James Smartff78d8f2011-12-13 13:21:35 -05007275 * Continue initialization with default values even if driver failed
7276 * to read FCoE param config regions, only read parameters if the
7277 * board is FCoE
7278 */
7279 if (phba->hba_flag & HBA_FCOE_MODE &&
7280 lpfc_sli4_read_fcoe_params(phba))
7281 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7282 "2570 Failed to read FCoE parameters\n");
7283
7284 /*
James Smartcd1c8302011-10-10 21:33:25 -04007285 * Retrieve sli4 device physical port name, failure of doing it
7286 * is considered as non-fatal.
7287 */
7288 rc = lpfc_sli4_retrieve_pport_name(phba);
7289 if (!rc)
7290 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7291 "3080 Successful retrieving SLI4 device "
7292 "physical port name: %s.\n", phba->Port);
7293
James Smartb3b4f3e2019-03-12 16:30:23 -07007294 rc = lpfc_sli4_get_ctl_attr(phba);
7295 if (!rc)
7296 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7297 "8351 Successful retrieving SLI4 device "
7298 "CTL ATTR\n");
7299
James Smartda0436e2009-05-22 14:51:39 -04007300 /*
7301 * Evaluate the read rev and vpd data. Populate the driver
7302 * state with the results. If this routine fails, the failure
7303 * is not fatal as the driver will use generic values.
7304 */
7305 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7306 if (unlikely(!rc)) {
7307 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7308 "0377 Error %d parsing vpd. "
7309 "Using defaults.\n", rc);
7310 rc = 0;
7311 }
James Smart76a95d72010-11-20 23:11:48 -05007312 kfree(vpd);
James Smartda0436e2009-05-22 14:51:39 -04007313
James Smartf1126682009-06-10 17:22:44 -04007314 /* Save information as VPD data */
7315 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7316 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
James Smart4e565cf2018-02-22 08:18:50 -08007317
7318 /*
7319 * This is because first G7 ASIC doesn't support the standard
7320 * 0x5a NVME cmd descriptor type/subtype
7321 */
7322 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7323 LPFC_SLI_INTF_IF_TYPE_6) &&
7324 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7325 (phba->vpd.rev.smRev == 0) &&
7326 (phba->cfg_nvme_embed_cmd == 1))
7327 phba->cfg_nvme_embed_cmd = 0;
7328
James Smartf1126682009-06-10 17:22:44 -04007329 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7330 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7331 &mqe->un.read_rev);
7332 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7333 &mqe->un.read_rev);
7334 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7335 &mqe->un.read_rev);
7336 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7337 &mqe->un.read_rev);
7338 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7339 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7340 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7341 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7342 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7343 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7344 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7345 "(%d):0380 READ_REV Status x%x "
7346 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7347 mboxq->vport ? mboxq->vport->vpi : 0,
7348 bf_get(lpfc_mqe_status, mqe),
7349 phba->vpd.rev.opFwName,
7350 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7351 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
James Smartda0436e2009-05-22 14:51:39 -04007352
James Smart65791f12016-07-06 12:35:56 -07007353 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
James Smart7bdedb32016-07-06 12:36:00 -07007354 LPFC_SLI_INTF_IF_TYPE_0) {
7355 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7356 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7357 if (rc == MBX_SUCCESS) {
7358 phba->hba_flag |= HBA_RECOVERABLE_UE;
7359 /* Set 1Sec interval to detect UE */
7360 phba->eratt_poll_interval = 1;
7361 phba->sli4_hba.ue_to_sr = bf_get(
7362 lpfc_mbx_set_feature_UESR,
7363 &mboxq->u.mqe.un.set_feature);
7364 phba->sli4_hba.ue_to_rp = bf_get(
7365 lpfc_mbx_set_feature_UERP,
7366 &mboxq->u.mqe.un.set_feature);
7367 }
7368 }
7369
7370 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7371 /* Enable MDS Diagnostics only if the SLI Port supports it */
7372 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7373 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7374 if (rc != MBX_SUCCESS)
7375 phba->mds_diags_support = 0;
7376 }
James Smart572709e2013-07-15 18:32:43 -04007377
James Smartda0436e2009-05-22 14:51:39 -04007378 /*
7379 * Discover the port's supported feature set and match it against the
7380 * hosts requests.
7381 */
7382 lpfc_request_features(phba, mboxq);
7383 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7384 if (unlikely(rc)) {
7385 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05007386 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04007387 }
7388
7389 /*
7390 * The port must support FCP initiator mode as this is the
7391 * only mode running in the host.
7392 */
7393 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7394 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7395 "0378 No support for fcpi mode.\n");
7396 ftr_rsp++;
7397 }
James Smart0bc2b7c2018-02-22 08:18:48 -08007398
7399 /* Performance Hints are ONLY for FCoE */
7400 if (phba->hba_flag & HBA_FCOE_MODE) {
7401 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7402 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7403 else
7404 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7405 }
7406
James Smartda0436e2009-05-22 14:51:39 -04007407 /*
7408 * If the port cannot support the host's requested features
7409 * then turn off the global config parameters to disable the
7410 * feature in the driver. This is not a fatal error.
7411 */
James Smartf44ac122018-03-05 12:04:08 -08007412 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7414 phba->cfg_enable_bg = 0;
7415 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
James Smartbf086112011-08-21 21:48:13 -04007416 ftr_rsp++;
James Smartf44ac122018-03-05 12:04:08 -08007417 }
James Smartbf086112011-08-21 21:48:13 -04007418 }
James Smartda0436e2009-05-22 14:51:39 -04007419
7420 if (phba->max_vpi && phba->cfg_enable_npiv &&
7421 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7422 ftr_rsp++;
7423
7424 if (ftr_rsp) {
7425 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7426 "0379 Feature Mismatch Data: x%08x %08x "
7427 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7428 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7429 phba->cfg_enable_npiv, phba->max_vpi);
7430 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7431 phba->cfg_enable_bg = 0;
7432 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7433 phba->cfg_enable_npiv = 0;
7434 }
7435
7436 /* These SLI3 features are assumed in SLI4 */
7437 spin_lock_irq(&phba->hbalock);
7438 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7439 spin_unlock_irq(&phba->hbalock);
7440
James Smart171f6c42019-11-04 16:57:07 -08007441 /* Always try to enable dual dump feature if we can */
7442 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7444 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7445 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
7447 "6448 Dual Dump is enabled\n");
7448 else
7449 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7450 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7451 "rc:x%x dd:x%x\n",
7452 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7453 lpfc_sli_config_mbox_subsys_get(
7454 phba, mboxq),
7455 lpfc_sli_config_mbox_opcode_get(
7456 phba, mboxq),
7457 rc, dd);
James Smart6d368e52011-05-24 11:44:12 -04007458 /*
7459 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7460 * calls depends on these resources to complete port setup.
7461 */
7462 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7463 if (rc) {
7464 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7465 "2920 Failed to alloc Resource IDs "
7466 "rc = x%x\n", rc);
7467 goto out_free_mbox;
7468 }
7469
James Smart61bda8f2016-10-13 15:06:05 -07007470 lpfc_set_host_data(phba, mboxq);
7471
7472 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7473 if (rc) {
7474 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7475 "2134 Failed to set host os driver version %x",
7476 rc);
7477 }
7478
James Smartda0436e2009-05-22 14:51:39 -04007479 /* Read the port's service parameters. */
James Smart9f1177a2010-02-26 14:12:57 -05007480 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7481 if (rc) {
7482 phba->link_state = LPFC_HBA_ERROR;
7483 rc = -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -05007484 goto out_free_mbox;
James Smart9f1177a2010-02-26 14:12:57 -05007485 }
7486
James Smartda0436e2009-05-22 14:51:39 -04007487 mboxq->vport = vport;
7488 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart3e1f0712018-11-29 16:09:29 -08007489 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
James Smartda0436e2009-05-22 14:51:39 -04007490 if (rc == MBX_SUCCESS) {
7491 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7492 rc = 0;
7493 }
7494
7495 /*
7496 * This memory was allocated by the lpfc_read_sparam routine. Release
7497 * it to the mbuf pool.
7498 */
7499 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7500 kfree(mp);
James Smart3e1f0712018-11-29 16:09:29 -08007501 mboxq->ctx_buf = NULL;
James Smartda0436e2009-05-22 14:51:39 -04007502 if (unlikely(rc)) {
7503 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7504 "0382 READ_SPARAM command failed "
7505 "status %d, mbxStatus x%x\n",
7506 rc, bf_get(lpfc_mqe_status, mqe));
7507 phba->link_state = LPFC_HBA_ERROR;
7508 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05007509 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04007510 }
7511
James Smart05580562011-05-24 11:40:48 -04007512 lpfc_update_vport_wwn(vport);
James Smartda0436e2009-05-22 14:51:39 -04007513
7514 /* Update the fc_host data structures with new wwn. */
7515 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7516 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7517
James Smart895427b2017-02-12 13:52:30 -08007518 /* Create all the SLI4 queues */
7519 rc = lpfc_sli4_queue_create(phba);
7520 if (rc) {
7521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7522 "3089 Failed to allocate queues\n");
7523 rc = -ENODEV;
7524 goto out_free_mbox;
7525 }
7526 /* Set up all the queues to the device */
7527 rc = lpfc_sli4_queue_setup(phba);
7528 if (unlikely(rc)) {
7529 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7530 "0381 Error %d during queue setup.\n ", rc);
7531 goto out_stop_timers;
7532 }
7533 /* Initialize the driver internal SLI layer lists. */
7534 lpfc_sli4_setup(phba);
7535 lpfc_sli4_queue_init(phba);
7536
7537 /* update host els xri-sgl sizes and mappings */
7538 rc = lpfc_sli4_els_sgl_update(phba);
James Smart8a9d2e82012-05-09 21:16:12 -04007539 if (unlikely(rc)) {
7540 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7541 "1400 Failed to update xri-sgl size and "
7542 "mapping: %d\n", rc);
James Smart895427b2017-02-12 13:52:30 -08007543 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04007544 }
7545
James Smart8a9d2e82012-05-09 21:16:12 -04007546 /* register the els sgl pool to the port */
James Smart895427b2017-02-12 13:52:30 -08007547 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7548 phba->sli4_hba.els_xri_cnt);
7549 if (unlikely(rc < 0)) {
James Smart8a9d2e82012-05-09 21:16:12 -04007550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7551 "0582 Error %d during els sgl post "
7552 "operation\n", rc);
7553 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08007554 goto out_destroy_queue;
7555 }
7556 phba->sli4_hba.els_xri_cnt = rc;
7557
James Smartf358dd02017-02-12 13:52:34 -08007558 if (phba->nvmet_support) {
7559 /* update host nvmet xri-sgl sizes and mappings */
7560 rc = lpfc_sli4_nvmet_sgl_update(phba);
7561 if (unlikely(rc)) {
7562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7563 "6308 Failed to update nvmet-sgl size "
7564 "and mapping: %d\n", rc);
7565 goto out_destroy_queue;
7566 }
7567
7568 /* register the nvmet sgl pool to the port */
7569 rc = lpfc_sli4_repost_sgl_list(
7570 phba,
7571 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7572 phba->sli4_hba.nvmet_xri_cnt);
7573 if (unlikely(rc < 0)) {
7574 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7575 "3117 Error %d during nvmet "
7576 "sgl post\n", rc);
7577 rc = -ENODEV;
7578 goto out_destroy_queue;
7579 }
7580 phba->sli4_hba.nvmet_xri_cnt = rc;
James Smart6c621a22017-05-15 15:20:45 -07007581
James Smarta5f73372019-09-21 20:58:50 -07007582 /* We allocate an iocbq for every receive context SGL.
7583 * The additional allocation is for abort and ls handling.
7584 */
7585 cnt = phba->sli4_hba.nvmet_xri_cnt +
7586 phba->sli4_hba.max_cfg_param.max_xri;
James Smartf358dd02017-02-12 13:52:34 -08007587 } else {
James Smart0794d602019-01-28 11:14:19 -08007588 /* update host common xri-sgl sizes and mappings */
James Smart5e5b5112019-01-28 11:14:22 -08007589 rc = lpfc_sli4_io_sgl_update(phba);
James Smart895427b2017-02-12 13:52:30 -08007590 if (unlikely(rc)) {
7591 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7592 "6082 Failed to update nvme-sgl size "
7593 "and mapping: %d\n", rc);
7594 goto out_destroy_queue;
7595 }
James Smart6c621a22017-05-15 15:20:45 -07007596
James Smart0794d602019-01-28 11:14:19 -08007597 /* register the allocated common sgl pool to the port */
James Smart5e5b5112019-01-28 11:14:22 -08007598 rc = lpfc_sli4_repost_io_sgl_list(phba);
James Smart0794d602019-01-28 11:14:19 -08007599 if (unlikely(rc)) {
7600 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7601 "6116 Error %d during nvme sgl post "
7602 "operation\n", rc);
7603 /* Some NVME buffers were moved to abort nvme list */
7604 /* A pci function reset will repost them */
7605 rc = -ENODEV;
7606 goto out_destroy_queue;
7607 }
James Smarta5f73372019-09-21 20:58:50 -07007608 /* Each lpfc_io_buf job structure has an iocbq element.
7609 * This cnt provides for abort, els, ct and ls requests.
7610 */
7611 cnt = phba->sli4_hba.max_cfg_param.max_xri;
James Smart11e644e2017-06-15 22:56:48 -07007612 }
7613
7614 if (!phba->sli.iocbq_lookup) {
James Smart6c621a22017-05-15 15:20:45 -07007615 /* Initialize and populate the iocb list per host */
7616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta5f73372019-09-21 20:58:50 -07007617 "2821 initialize iocb list with %d entries\n",
7618 cnt);
James Smart6c621a22017-05-15 15:20:45 -07007619 rc = lpfc_init_iocb_list(phba, cnt);
7620 if (rc) {
7621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart11e644e2017-06-15 22:56:48 -07007622 "1413 Failed to init iocb list.\n");
James Smart6c621a22017-05-15 15:20:45 -07007623 goto out_destroy_queue;
7624 }
James Smart8a9d2e82012-05-09 21:16:12 -04007625 }
7626
James Smart11e644e2017-06-15 22:56:48 -07007627 if (phba->nvmet_support)
7628 lpfc_nvmet_create_targetport(phba);
7629
James Smart2d7dbc42017-02-12 13:52:35 -08007630 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
James Smart2d7dbc42017-02-12 13:52:35 -08007631 /* Post initial buffers to all RQs created */
7632 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7633 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7634 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7635 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7636 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
James Smart61f3d4b2017-05-15 15:20:41 -07007637 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
James Smart2d7dbc42017-02-12 13:52:35 -08007638 rqbp->buffer_count = 0;
7639
James Smart2d7dbc42017-02-12 13:52:35 -08007640 lpfc_post_rq_buffer(
7641 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7642 phba->sli4_hba.nvmet_mrq_data[i],
James Smart2448e482018-04-09 14:24:24 -07007643 phba->cfg_nvmet_mrq_post, i);
James Smart2d7dbc42017-02-12 13:52:35 -08007644 }
7645 }
7646
James Smartda0436e2009-05-22 14:51:39 -04007647 /* Post the rpi header region to the device. */
7648 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7649 if (unlikely(rc)) {
7650 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7651 "0393 Error %d during rpi post operation\n",
7652 rc);
7653 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08007654 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04007655 }
James Smart97f2ecf2012-03-01 22:35:23 -05007656 lpfc_sli4_node_prep(phba);
James Smartda0436e2009-05-22 14:51:39 -04007657
James Smart895427b2017-02-12 13:52:30 -08007658 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
James Smart2d7dbc42017-02-12 13:52:35 -08007659 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
James Smart895427b2017-02-12 13:52:30 -08007660 /*
7661 * The FC Port needs to register FCFI (index 0)
7662 */
7663 lpfc_reg_fcfi(phba, mboxq);
7664 mboxq->vport = phba->pport;
7665 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7666 if (rc != MBX_SUCCESS)
7667 goto out_unset_queue;
7668 rc = 0;
7669 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7670 &mboxq->u.mqe.un.reg_fcfi);
James Smart2d7dbc42017-02-12 13:52:35 -08007671 } else {
7672 /* We are a NVME Target mode with MRQ > 1 */
7673
7674 /* First register the FCFI */
7675 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7676 mboxq->vport = phba->pport;
7677 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7678 if (rc != MBX_SUCCESS)
7679 goto out_unset_queue;
7680 rc = 0;
7681 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7682 &mboxq->u.mqe.un.reg_fcfi_mrq);
7683
7684 /* Next register the MRQs */
7685 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7686 mboxq->vport = phba->pport;
7687 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7688 if (rc != MBX_SUCCESS)
7689 goto out_unset_queue;
7690 rc = 0;
James Smart895427b2017-02-12 13:52:30 -08007691 }
7692 /* Check if the port is configured to be disabled */
7693 lpfc_sli_read_link_ste(phba);
James Smartda0436e2009-05-22 14:51:39 -04007694 }
7695
James Smartc4908502019-01-28 11:14:28 -08007696 /* Don't post more new bufs if repost already recovered
7697 * the nvme sgls.
7698 */
7699 if (phba->nvmet_support == 0) {
7700 if (phba->sli4_hba.io_xri_cnt == 0) {
7701 len = lpfc_new_io_buf(
7702 phba, phba->sli4_hba.io_xri_max);
7703 if (len == 0) {
7704 rc = -ENOMEM;
7705 goto out_unset_queue;
7706 }
7707
7708 if (phba->cfg_xri_rebalancing)
7709 lpfc_create_multixri_pools(phba);
7710 }
7711 } else {
7712 phba->cfg_xri_rebalancing = 0;
7713 }
7714
James Smartda0436e2009-05-22 14:51:39 -04007715 /* Allow asynchronous mailbox command to go through */
7716 spin_lock_irq(&phba->hbalock);
7717 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7718 spin_unlock_irq(&phba->hbalock);
7719
7720 /* Post receive buffers to the device */
7721 lpfc_sli4_rb_setup(phba);
7722
James Smartfc2b9892010-02-26 14:15:29 -05007723 /* Reset HBA FCF states after HBA reset */
7724 phba->fcf.fcf_flag = 0;
7725 phba->fcf.current_rec.flag = 0;
7726
James Smartda0436e2009-05-22 14:51:39 -04007727 /* Start the ELS watchdog timer */
James Smart8fa38512009-07-19 10:01:03 -04007728 mod_timer(&vport->els_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04007729 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
James Smartda0436e2009-05-22 14:51:39 -04007730
7731 /* Start heart beat timer */
7732 mod_timer(&phba->hb_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04007733 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
James Smartda0436e2009-05-22 14:51:39 -04007734 phba->hb_outstanding = 0;
7735 phba->last_completion_time = jiffies;
7736
James Smart32517fc2019-01-28 11:14:33 -08007737 /* start eq_delay heartbeat */
7738 if (phba->cfg_auto_imax)
7739 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7740 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7741
James Smartda0436e2009-05-22 14:51:39 -04007742 /* Start error attention (ERATT) polling timer */
James Smart256ec0d2013-04-17 20:14:58 -04007743 mod_timer(&phba->eratt_poll,
James Smart65791f12016-07-06 12:35:56 -07007744 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smartda0436e2009-05-22 14:51:39 -04007745
James Smart75baf692010-06-08 18:31:21 -04007746 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7747 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7748 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7749 if (!rc) {
7750 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7751 "2829 This device supports "
7752 "Advanced Error Reporting (AER)\n");
7753 spin_lock_irq(&phba->hbalock);
7754 phba->hba_flag |= HBA_AER_ENABLED;
7755 spin_unlock_irq(&phba->hbalock);
7756 } else {
7757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7758 "2830 This device does not support "
7759 "Advanced Error Reporting (AER)\n");
7760 phba->cfg_aer_support = 0;
7761 }
James Smart0a96e972011-07-22 18:37:28 -04007762 rc = 0;
James Smart75baf692010-06-08 18:31:21 -04007763 }
7764
James Smartda0436e2009-05-22 14:51:39 -04007765 /*
7766 * The port is ready, set the host's link state to LINK_DOWN
7767 * in preparation for link interrupts.
7768 */
James Smartda0436e2009-05-22 14:51:39 -04007769 spin_lock_irq(&phba->hbalock);
7770 phba->link_state = LPFC_LINK_DOWN;
James Smart1dc5ec22018-10-23 13:41:11 -07007771
7772 /* Check if physical ports are trunked */
7773 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7774 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7775 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7776 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7777 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7778 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7779 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7780 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
James Smartda0436e2009-05-22 14:51:39 -04007781 spin_unlock_irq(&phba->hbalock);
James Smart1dc5ec22018-10-23 13:41:11 -07007782
James Smarte8869f52019-03-12 16:30:18 -07007783 /* Arm the CQs and then EQs on device */
7784 lpfc_sli4_arm_cqeq_intr(phba);
7785
7786 /* Indicate device interrupt mode */
7787 phba->sli4_hba.intr_enable = 1;
7788
James Smart026abb82011-12-13 13:20:45 -05007789 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7790 (phba->hba_flag & LINK_DISABLED)) {
7791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7792 "3103 Adapter Link is disabled.\n");
7793 lpfc_down_link(phba, mboxq);
7794 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7795 if (rc != MBX_SUCCESS) {
7796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7797 "3104 Adapter failed to issue "
7798 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
James Smartc4908502019-01-28 11:14:28 -08007799 goto out_io_buff_free;
James Smart026abb82011-12-13 13:20:45 -05007800 }
7801 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
James Smart1b511972011-12-13 13:23:09 -05007802 /* don't perform init_link on SLI4 FC port loopback test */
7803 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7804 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7805 if (rc)
James Smartc4908502019-01-28 11:14:28 -08007806 goto out_io_buff_free;
James Smart1b511972011-12-13 13:23:09 -05007807 }
James Smart5350d872011-10-10 21:33:49 -04007808 }
7809 mempool_free(mboxq, phba->mbox_mem_pool);
7810 return rc;
James Smartc4908502019-01-28 11:14:28 -08007811out_io_buff_free:
7812 /* Free allocated IO Buffers */
7813 lpfc_io_free(phba);
James Smart76a95d72010-11-20 23:11:48 -05007814out_unset_queue:
James Smartda0436e2009-05-22 14:51:39 -04007815 /* Unset all the queues set up in this routine when error out */
James Smart5350d872011-10-10 21:33:49 -04007816 lpfc_sli4_queue_unset(phba);
7817out_destroy_queue:
James Smart6c621a22017-05-15 15:20:45 -07007818 lpfc_free_iocb_list(phba);
James Smart5350d872011-10-10 21:33:49 -04007819 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04007820out_stop_timers:
James Smart5350d872011-10-10 21:33:49 -04007821 lpfc_stop_hba_timers(phba);
James Smartda0436e2009-05-22 14:51:39 -04007822out_free_mbox:
7823 mempool_free(mboxq, phba->mbox_mem_pool);
7824 return rc;
7825}
James Smarte59058c2008-08-24 21:49:00 -04007826
7827/**
James Smart3621a712009-04-06 18:47:14 -04007828 * lpfc_mbox_timeout - Timeout call back function for mbox timer
James Smarte59058c2008-08-24 21:49:00 -04007829 * @ptr: context object - pointer to hba structure.
dea31012005-04-17 16:05:31 -05007830 *
James Smarte59058c2008-08-24 21:49:00 -04007831 * This is the callback function for mailbox timer. The mailbox
7832 * timer is armed when a new mailbox command is issued and the timer
7833 * is deleted when the mailbox complete. The function is called by
7834 * the kernel timer code when a mailbox does not complete within
7835 * expected time. This function wakes up the worker thread to
7836 * process the mailbox timeout and returns. All the processing is
7837 * done by the worker thread function lpfc_mbox_timeout_handler.
7838 **/
dea31012005-04-17 16:05:31 -05007839void
Kees Cookf22eb4d2017-09-06 20:24:26 -07007840lpfc_mbox_timeout(struct timer_list *t)
dea31012005-04-17 16:05:31 -05007841{
Kees Cookf22eb4d2017-09-06 20:24:26 -07007842 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
dea31012005-04-17 16:05:31 -05007843 unsigned long iflag;
James Smart2e0fef82007-06-17 19:56:36 -05007844 uint32_t tmo_posted;
dea31012005-04-17 16:05:31 -05007845
James Smart2e0fef82007-06-17 19:56:36 -05007846 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -05007847 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
James Smart2e0fef82007-06-17 19:56:36 -05007848 if (!tmo_posted)
7849 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7850 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7851
James Smart5e9d9b82008-06-14 22:52:53 -04007852 if (!tmo_posted)
7853 lpfc_worker_wake_up(phba);
7854 return;
dea31012005-04-17 16:05:31 -05007855}
7856
James Smarte8d3c3b2013-10-10 12:21:30 -04007857/**
7858 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7859 * are pending
7860 * @phba: Pointer to HBA context object.
7861 *
7862 * This function checks if any mailbox completions are present on the mailbox
7863 * completion queue.
7864 **/
Nicholas Krause3bb11fc2015-08-31 16:48:13 -04007865static bool
James Smarte8d3c3b2013-10-10 12:21:30 -04007866lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7867{
7868
7869 uint32_t idx;
7870 struct lpfc_queue *mcq;
7871 struct lpfc_mcqe *mcqe;
7872 bool pending_completions = false;
James Smart7365f6f2018-02-22 08:18:46 -08007873 uint8_t qe_valid;
James Smarte8d3c3b2013-10-10 12:21:30 -04007874
7875 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7876 return false;
7877
7878 /* Check for completions on mailbox completion queue */
7879
7880 mcq = phba->sli4_hba.mbx_cq;
7881 idx = mcq->hba_index;
James Smart7365f6f2018-02-22 08:18:46 -08007882 qe_valid = mcq->qe_valid;
James Smart9afbee32019-03-12 16:30:28 -07007883 while (bf_get_le32(lpfc_cqe_valid,
7884 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7885 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
James Smarte8d3c3b2013-10-10 12:21:30 -04007886 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7887 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7888 pending_completions = true;
7889 break;
7890 }
7891 idx = (idx + 1) % mcq->entry_count;
7892 if (mcq->hba_index == idx)
7893 break;
James Smart7365f6f2018-02-22 08:18:46 -08007894
7895 /* if the index wrapped around, toggle the valid bit */
7896 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7897 qe_valid = (qe_valid) ? 0 : 1;
James Smarte8d3c3b2013-10-10 12:21:30 -04007898 }
7899 return pending_completions;
7900
7901}
7902
7903/**
7904 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7905 * that were missed.
7906 * @phba: Pointer to HBA context object.
7907 *
7908 * For sli4, it is possible to miss an interrupt. As such mbox completions
7909 * maybe missed causing erroneous mailbox timeouts to occur. This function
7910 * checks to see if mbox completions are on the mailbox completion queue
7911 * and will process all the completions associated with the eq for the
7912 * mailbox completion queue.
7913 **/
YueHaibingd7b761b2019-05-31 23:28:41 +08007914static bool
James Smarte8d3c3b2013-10-10 12:21:30 -04007915lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7916{
James Smartb71413d2018-02-22 08:18:40 -08007917 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
James Smarte8d3c3b2013-10-10 12:21:30 -04007918 uint32_t eqidx;
7919 struct lpfc_queue *fpeq = NULL;
James Smart657add42019-05-21 17:49:06 -07007920 struct lpfc_queue *eq;
James Smarte8d3c3b2013-10-10 12:21:30 -04007921 bool mbox_pending;
7922
7923 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7924 return false;
7925
James Smart657add42019-05-21 17:49:06 -07007926 /* Find the EQ associated with the mbox CQ */
7927 if (sli4_hba->hdwq) {
7928 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7929 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
Daniel Wagner535fb492019-10-18 18:21:11 +02007930 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
James Smart657add42019-05-21 17:49:06 -07007931 fpeq = eq;
James Smarte8d3c3b2013-10-10 12:21:30 -04007932 break;
7933 }
James Smart657add42019-05-21 17:49:06 -07007934 }
7935 }
James Smarte8d3c3b2013-10-10 12:21:30 -04007936 if (!fpeq)
7937 return false;
7938
7939 /* Turn off interrupts from this EQ */
7940
James Smartb71413d2018-02-22 08:18:40 -08007941 sli4_hba->sli4_eq_clr_intr(fpeq);
James Smarte8d3c3b2013-10-10 12:21:30 -04007942
7943 /* Check to see if a mbox completion is pending */
7944
7945 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7946
7947 /*
7948 * If a mbox completion is pending, process all the events on EQ
7949 * associated with the mbox completion queue (this could include
7950 * mailbox commands, async events, els commands, receive queue data
7951 * and fcp commands)
7952 */
7953
7954 if (mbox_pending)
James Smart32517fc2019-01-28 11:14:33 -08007955 /* process and rearm the EQ */
James Smart93a4d6f2019-11-04 16:57:05 -08007956 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
James Smart32517fc2019-01-28 11:14:33 -08007957 else
7958 /* Always clear and re-arm the EQ */
7959 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
James Smarte8d3c3b2013-10-10 12:21:30 -04007960
7961 return mbox_pending;
7962
7963}
James Smarte59058c2008-08-24 21:49:00 -04007964
7965/**
James Smart3621a712009-04-06 18:47:14 -04007966 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
James Smarte59058c2008-08-24 21:49:00 -04007967 * @phba: Pointer to HBA context object.
7968 *
7969 * This function is called from worker thread when a mailbox command times out.
7970 * The caller is not required to hold any locks. This function will reset the
7971 * HBA and recover all the pending commands.
7972 **/
dea31012005-04-17 16:05:31 -05007973void
7974lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7975{
James Smart2e0fef82007-06-17 19:56:36 -05007976 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
James Smarteb016562014-09-03 12:58:06 -04007977 MAILBOX_t *mb = NULL;
7978
James Smart1dcb58e2007-04-25 09:51:30 -04007979 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05007980
James Smarte8d3c3b2013-10-10 12:21:30 -04007981 /* If the mailbox completed, process the completion and return */
7982 if (lpfc_sli4_process_missed_mbox_completions(phba))
7983 return;
7984
James Smarteb016562014-09-03 12:58:06 -04007985 if (pmbox != NULL)
7986 mb = &pmbox->u.mb;
James Smarta257bf92009-04-06 18:48:10 -04007987 /* Check the pmbox pointer first. There is a race condition
7988 * between the mbox timeout handler getting executed in the
7989 * worklist and the mailbox actually completing. When this
7990 * race condition occurs, the mbox_active will be NULL.
7991 */
7992 spin_lock_irq(&phba->hbalock);
7993 if (pmbox == NULL) {
7994 lpfc_printf_log(phba, KERN_WARNING,
7995 LOG_MBOX | LOG_SLI,
7996 "0353 Active Mailbox cleared - mailbox timeout "
7997 "exiting\n");
7998 spin_unlock_irq(&phba->hbalock);
7999 return;
8000 }
8001
dea31012005-04-17 16:05:31 -05008002 /* Mbox cmd <mbxCommand> timeout */
James Smarted957682007-06-17 19:56:37 -05008003 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart32350662019-08-14 16:57:06 -07008004 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
James Smart92d7f7b2007-06-17 19:56:38 -05008005 mb->mbxCommand,
8006 phba->pport->port_state,
8007 phba->sli.sli_flag,
8008 phba->sli.mbox_active);
James Smarta257bf92009-04-06 18:48:10 -04008009 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008010
James Smart1dcb58e2007-04-25 09:51:30 -04008011 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8012 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008013 * it to fail all outstanding SCSI IO.
James Smart1dcb58e2007-04-25 09:51:30 -04008014 */
James Smart2e0fef82007-06-17 19:56:36 -05008015 spin_lock_irq(&phba->pport->work_port_lock);
8016 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8017 spin_unlock_irq(&phba->pport->work_port_lock);
8018 spin_lock_irq(&phba->hbalock);
8019 phba->link_state = LPFC_LINK_UNKNOWN;
James Smartf4b4c682009-05-22 14:53:12 -04008020 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05008021 spin_unlock_irq(&phba->hbalock);
James Smart1dcb58e2007-04-25 09:51:30 -04008022
James Smartdb55fba2014-04-04 13:52:02 -04008023 lpfc_sli_abort_fcp_rings(phba);
James Smart1dcb58e2007-04-25 09:51:30 -04008024
8025 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04008026 "0345 Resetting board due to mailbox timeout\n");
James Smart3772a992009-05-22 14:50:54 -04008027
8028 /* Reset the HBA device */
8029 lpfc_reset_hba(phba);
dea31012005-04-17 16:05:31 -05008030}
8031
James Smarte59058c2008-08-24 21:49:00 -04008032/**
James Smart3772a992009-05-22 14:50:54 -04008033 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
James Smarte59058c2008-08-24 21:49:00 -04008034 * @phba: Pointer to HBA context object.
8035 * @pmbox: Pointer to mailbox object.
8036 * @flag: Flag indicating how the mailbox need to be processed.
8037 *
8038 * This function is called by discovery code and HBA management code
James Smart3772a992009-05-22 14:50:54 -04008039 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8040 * function gets the hbalock to protect the data structures.
James Smarte59058c2008-08-24 21:49:00 -04008041 * The mailbox command can be submitted in polling mode, in which case
8042 * this function will wait in a polling loop for the completion of the
8043 * mailbox.
8044 * If the mailbox is submitted in no_wait mode (not polling) the
8045 * function will submit the command and returns immediately without waiting
8046 * for the mailbox completion. The no_wait is supported only when HBA
8047 * is in SLI2/SLI3 mode - interrupts are enabled.
8048 * The SLI interface allows only one mailbox pending at a time. If the
8049 * mailbox is issued in polling mode and there is already a mailbox
8050 * pending, then the function will return an error. If the mailbox is issued
8051 * in NO_WAIT mode and there is a mailbox pending already, the function
8052 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8053 * The sli layer owns the mailbox object until the completion of mailbox
8054 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8055 * return codes the caller owns the mailbox command after the return of
8056 * the function.
8057 **/
James Smart3772a992009-05-22 14:50:54 -04008058static int
8059lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8060 uint32_t flag)
dea31012005-04-17 16:05:31 -05008061{
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008062 MAILBOX_t *mbx;
James Smart2e0fef82007-06-17 19:56:36 -05008063 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05008064 uint32_t status, evtctr;
James Smart9940b972011-03-11 16:06:12 -05008065 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -05008066 int i;
James Smart09372822008-01-11 01:52:54 -05008067 unsigned long timeout;
dea31012005-04-17 16:05:31 -05008068 unsigned long drvr_flag = 0;
James Smart34b02dc2008-08-24 21:49:55 -04008069 uint32_t word0, ldata;
dea31012005-04-17 16:05:31 -05008070 void __iomem *to_slim;
James Smart58da1ff2008-04-07 10:15:56 -04008071 int processing_queue = 0;
8072
8073 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8074 if (!pmbox) {
James Smart8568a4d2009-07-19 10:01:16 -04008075 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart58da1ff2008-04-07 10:15:56 -04008076 /* processing mbox queue from intr_handler */
James Smart3772a992009-05-22 14:50:54 -04008077 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8078 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8079 return MBX_SUCCESS;
8080 }
James Smart58da1ff2008-04-07 10:15:56 -04008081 processing_queue = 1;
James Smart58da1ff2008-04-07 10:15:56 -04008082 pmbox = lpfc_mbox_get(phba);
8083 if (!pmbox) {
8084 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8085 return MBX_SUCCESS;
8086 }
8087 }
dea31012005-04-17 16:05:31 -05008088
James Smarted957682007-06-17 19:56:37 -05008089 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
James Smart92d7f7b2007-06-17 19:56:38 -05008090 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
James Smarted957682007-06-17 19:56:37 -05008091 if(!pmbox->vport) {
James Smart58da1ff2008-04-07 10:15:56 -04008092 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
James Smarted957682007-06-17 19:56:37 -05008093 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05008094 LOG_MBOX | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04008095 "1806 Mbox x%x failed. No vport\n",
James Smart3772a992009-05-22 14:50:54 -04008096 pmbox->u.mb.mbxCommand);
James Smarted957682007-06-17 19:56:37 -05008097 dump_stack();
James Smart58da1ff2008-04-07 10:15:56 -04008098 goto out_not_finished;
James Smarted957682007-06-17 19:56:37 -05008099 }
8100 }
8101
Linas Vepstas8d63f372007-02-14 14:28:36 -06008102 /* If the PCI channel is in offline state, do not post mbox. */
James Smart58da1ff2008-04-07 10:15:56 -04008103 if (unlikely(pci_channel_offline(phba->pcidev))) {
8104 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8105 goto out_not_finished;
8106 }
Linas Vepstas8d63f372007-02-14 14:28:36 -06008107
James Smarta257bf92009-04-06 18:48:10 -04008108 /* If HBA has a deferred error attention, fail the iocb. */
8109 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8110 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8111 goto out_not_finished;
8112 }
8113
dea31012005-04-17 16:05:31 -05008114 psli = &phba->sli;
James Smart92d7f7b2007-06-17 19:56:38 -05008115
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008116 mbx = &pmbox->u.mb;
dea31012005-04-17 16:05:31 -05008117 status = MBX_SUCCESS;
8118
James Smart2e0fef82007-06-17 19:56:36 -05008119 if (phba->link_state == LPFC_HBA_ERROR) {
8120 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05008121
8122 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04008123 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8124 "(%d):0311 Mailbox command x%x cannot "
8125 "issue Data: x%x x%x\n",
8126 pmbox->vport ? pmbox->vport->vpi : 0,
8127 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008128 goto out_not_finished;
Jamie Wellnitz41415862006-02-28 19:25:27 -05008129 }
8130
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008131 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
James Smart9940b972011-03-11 16:06:12 -05008132 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8133 !(hc_copy & HC_MBINT_ENA)) {
8134 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart3772a992009-05-22 14:50:54 -04008136 "(%d):2528 Mailbox command x%x cannot "
8137 "issue Data: x%x x%x\n",
8138 pmbox->vport ? pmbox->vport->vpi : 0,
8139 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart9940b972011-03-11 16:06:12 -05008140 goto out_not_finished;
8141 }
James Smart92908312006-03-07 15:04:13 -05008142 }
8143
dea31012005-04-17 16:05:31 -05008144 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8145 /* Polling for a mbox command when another one is already active
8146 * is not allowed in SLI. Also, the driver must have established
8147 * SLI2 mode to queue and process multiple mbox commands.
8148 */
8149
8150 if (flag & MBX_POLL) {
James Smart2e0fef82007-06-17 19:56:36 -05008151 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008152
8153 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04008154 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8155 "(%d):2529 Mailbox command x%x "
8156 "cannot issue Data: x%x x%x\n",
8157 pmbox->vport ? pmbox->vport->vpi : 0,
8158 pmbox->u.mb.mbxCommand,
8159 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008160 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008161 }
8162
James Smart3772a992009-05-22 14:50:54 -04008163 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
James Smart2e0fef82007-06-17 19:56:36 -05008164 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008165 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04008166 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8167 "(%d):2530 Mailbox command x%x "
8168 "cannot issue Data: x%x x%x\n",
8169 pmbox->vport ? pmbox->vport->vpi : 0,
8170 pmbox->u.mb.mbxCommand,
8171 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008172 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008173 }
8174
dea31012005-04-17 16:05:31 -05008175 /* Another mailbox command is still being processed, queue this
8176 * command to be processed later.
8177 */
8178 lpfc_mbox_put(phba, pmbox);
8179
8180 /* Mbox cmd issue - BUSY */
James Smarted957682007-06-17 19:56:37 -05008181 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04008182 "(%d):0308 Mbox cmd issue - BUSY Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05008183 "x%x x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05008184 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
James Smarte92974f2017-06-01 21:07:06 -07008185 mbx->mbxCommand,
8186 phba->pport ? phba->pport->port_state : 0xff,
James Smart92d7f7b2007-06-17 19:56:38 -05008187 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05008188
8189 psli->slistat.mbox_busy++;
James Smart2e0fef82007-06-17 19:56:36 -05008190 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008191
James Smart858c9f62007-06-17 19:56:39 -05008192 if (pmbox->vport) {
8193 lpfc_debugfs_disc_trc(pmbox->vport,
8194 LPFC_DISC_TRC_MBOX_VPORT,
8195 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008196 (uint32_t)mbx->mbxCommand,
8197 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008198 }
8199 else {
8200 lpfc_debugfs_disc_trc(phba->pport,
8201 LPFC_DISC_TRC_MBOX,
8202 "MBOX Bsy: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008203 (uint32_t)mbx->mbxCommand,
8204 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008205 }
8206
James Smart2e0fef82007-06-17 19:56:36 -05008207 return MBX_BUSY;
dea31012005-04-17 16:05:31 -05008208 }
8209
dea31012005-04-17 16:05:31 -05008210 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8211
8212 /* If we are not polling, we MUST be in SLI2 mode */
8213 if (flag != MBX_POLL) {
James Smart3772a992009-05-22 14:50:54 -04008214 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008215 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea31012005-04-17 16:05:31 -05008216 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05008217 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008218 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04008219 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8220 "(%d):2531 Mailbox command x%x "
8221 "cannot issue Data: x%x x%x\n",
8222 pmbox->vport ? pmbox->vport->vpi : 0,
8223 pmbox->u.mb.mbxCommand,
8224 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008225 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008226 }
8227 /* timeout active mbox command */
James Smart256ec0d2013-04-17 20:14:58 -04008228 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8229 1000);
8230 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea31012005-04-17 16:05:31 -05008231 }
8232
8233 /* Mailbox cmd <cmd> issue */
James Smarted957682007-06-17 19:56:37 -05008234 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04008235 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05008236 "x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04008237 pmbox->vport ? pmbox->vport->vpi : 0,
James Smarte92974f2017-06-01 21:07:06 -07008238 mbx->mbxCommand,
8239 phba->pport ? phba->pport->port_state : 0xff,
James Smart92d7f7b2007-06-17 19:56:38 -05008240 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05008241
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008242 if (mbx->mbxCommand != MBX_HEARTBEAT) {
James Smart858c9f62007-06-17 19:56:39 -05008243 if (pmbox->vport) {
8244 lpfc_debugfs_disc_trc(pmbox->vport,
8245 LPFC_DISC_TRC_MBOX_VPORT,
8246 "MBOX Send vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008247 (uint32_t)mbx->mbxCommand,
8248 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008249 }
8250 else {
8251 lpfc_debugfs_disc_trc(phba->pport,
8252 LPFC_DISC_TRC_MBOX,
8253 "MBOX Send: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008254 (uint32_t)mbx->mbxCommand,
8255 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008256 }
8257 }
8258
dea31012005-04-17 16:05:31 -05008259 psli->slistat.mbox_cmd++;
8260 evtctr = psli->slistat.mbox_event;
8261
8262 /* next set own bit for the adapter and copy over command word */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008263 mbx->mbxOwner = OWN_CHIP;
dea31012005-04-17 16:05:31 -05008264
James Smart3772a992009-05-22 14:50:54 -04008265 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart7a470272010-03-15 11:25:20 -04008266 /* Populate mbox extension offset word. */
8267 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008268 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04008269 = (uint8_t *)phba->mbox_ext
8270 - (uint8_t *)phba->mbox;
8271 }
8272
8273 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008274 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8275 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8276 (uint8_t *)phba->mbox_ext,
8277 pmbox->in_ext_byte_len);
James Smart7a470272010-03-15 11:25:20 -04008278 }
8279 /* Copy command data to host SLIM area */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008280 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05008281 } else {
James Smart7a470272010-03-15 11:25:20 -04008282 /* Populate mbox extension offset word. */
8283 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008284 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04008285 = MAILBOX_HBA_EXT_OFFSET;
8286
8287 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008288 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
James Smart7a470272010-03-15 11:25:20 -04008289 lpfc_memcpy_to_slim(phba->MBslimaddr +
8290 MAILBOX_HBA_EXT_OFFSET,
James Smart3e1f0712018-11-29 16:09:29 -08008291 pmbox->ctx_buf, pmbox->in_ext_byte_len);
James Smart7a470272010-03-15 11:25:20 -04008292
James Smart895427b2017-02-12 13:52:30 -08008293 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05008294 /* copy command data into host mbox for cmpl */
James Smart895427b2017-02-12 13:52:30 -08008295 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8296 MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05008297
8298 /* First copy mbox command data to HBA SLIM, skip past first
8299 word */
8300 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008301 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea31012005-04-17 16:05:31 -05008302 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8303
8304 /* Next copy over first word, with mbxOwner set */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008305 ldata = *((uint32_t *)mbx);
dea31012005-04-17 16:05:31 -05008306 to_slim = phba->MBslimaddr;
8307 writel(ldata, to_slim);
8308 readl(to_slim); /* flush */
8309
James Smart895427b2017-02-12 13:52:30 -08008310 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05008311 /* switch over to host mailbox */
James Smart3772a992009-05-22 14:50:54 -04008312 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05008313 }
8314
8315 wmb();
dea31012005-04-17 16:05:31 -05008316
8317 switch (flag) {
8318 case MBX_NOWAIT:
James Smart09372822008-01-11 01:52:54 -05008319 /* Set up reference to mailbox command */
dea31012005-04-17 16:05:31 -05008320 psli->mbox_active = pmbox;
James Smart09372822008-01-11 01:52:54 -05008321 /* Interrupt board to do it */
8322 writel(CA_MBATT, phba->CAregaddr);
8323 readl(phba->CAregaddr); /* flush */
8324 /* Don't wait for it to finish, just return */
dea31012005-04-17 16:05:31 -05008325 break;
8326
8327 case MBX_POLL:
James Smart09372822008-01-11 01:52:54 -05008328 /* Set up null reference to mailbox command */
dea31012005-04-17 16:05:31 -05008329 psli->mbox_active = NULL;
James Smart09372822008-01-11 01:52:54 -05008330 /* Interrupt board to do it */
8331 writel(CA_MBATT, phba->CAregaddr);
8332 readl(phba->CAregaddr); /* flush */
8333
James Smart3772a992009-05-22 14:50:54 -04008334 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05008335 /* First read mbox status word */
James Smart34b02dc2008-08-24 21:49:55 -04008336 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05008337 word0 = le32_to_cpu(word0);
8338 } else {
8339 /* First read mbox status word */
James Smart9940b972011-03-11 16:06:12 -05008340 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8341 spin_unlock_irqrestore(&phba->hbalock,
8342 drvr_flag);
8343 goto out_not_finished;
8344 }
dea31012005-04-17 16:05:31 -05008345 }
8346
8347 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05008348 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8349 spin_unlock_irqrestore(&phba->hbalock,
8350 drvr_flag);
8351 goto out_not_finished;
8352 }
James Smarta183a152011-10-10 21:32:43 -04008353 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8354 1000) + jiffies;
James Smart09372822008-01-11 01:52:54 -05008355 i = 0;
dea31012005-04-17 16:05:31 -05008356 /* Wait for command to complete */
Jamie Wellnitz41415862006-02-28 19:25:27 -05008357 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8358 (!(ha_copy & HA_MBATT) &&
James Smart2e0fef82007-06-17 19:56:36 -05008359 (phba->link_state > LPFC_WARM_START))) {
James Smart09372822008-01-11 01:52:54 -05008360 if (time_after(jiffies, timeout)) {
dea31012005-04-17 16:05:31 -05008361 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05008362 spin_unlock_irqrestore(&phba->hbalock,
dea31012005-04-17 16:05:31 -05008363 drvr_flag);
James Smart58da1ff2008-04-07 10:15:56 -04008364 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008365 }
8366
8367 /* Check if we took a mbox interrupt while we were
8368 polling */
8369 if (((word0 & OWN_CHIP) != OWN_CHIP)
8370 && (evtctr != psli->slistat.mbox_event))
8371 break;
8372
James Smart09372822008-01-11 01:52:54 -05008373 if (i++ > 10) {
8374 spin_unlock_irqrestore(&phba->hbalock,
8375 drvr_flag);
8376 msleep(1);
8377 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8378 }
dea31012005-04-17 16:05:31 -05008379
James Smart3772a992009-05-22 14:50:54 -04008380 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05008381 /* First copy command data */
James Smart34b02dc2008-08-24 21:49:55 -04008382 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05008383 word0 = le32_to_cpu(word0);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008384 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea31012005-04-17 16:05:31 -05008385 MAILBOX_t *slimmb;
James Smart34b02dc2008-08-24 21:49:55 -04008386 uint32_t slimword0;
dea31012005-04-17 16:05:31 -05008387 /* Check real SLIM for any errors */
8388 slimword0 = readl(phba->MBslimaddr);
8389 slimmb = (MAILBOX_t *) & slimword0;
8390 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8391 && slimmb->mbxStatus) {
8392 psli->sli_flag &=
James Smart3772a992009-05-22 14:50:54 -04008393 ~LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05008394 word0 = slimword0;
8395 }
8396 }
8397 } else {
8398 /* First copy command data */
8399 word0 = readl(phba->MBslimaddr);
8400 }
8401 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05008402 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8403 spin_unlock_irqrestore(&phba->hbalock,
8404 drvr_flag);
8405 goto out_not_finished;
8406 }
dea31012005-04-17 16:05:31 -05008407 }
8408
James Smart3772a992009-05-22 14:50:54 -04008409 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05008410 /* copy results back to user */
James Smart2ea259e2017-02-12 13:52:27 -08008411 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8412 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04008413 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008414 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
James Smart7a470272010-03-15 11:25:20 -04008415 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
James Smart3e1f0712018-11-29 16:09:29 -08008416 pmbox->ctx_buf,
James Smart7a470272010-03-15 11:25:20 -04008417 pmbox->out_ext_byte_len);
8418 }
dea31012005-04-17 16:05:31 -05008419 } else {
8420 /* First copy command data */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008421 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
James Smart2ea259e2017-02-12 13:52:27 -08008422 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04008423 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008424 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8425 lpfc_memcpy_from_slim(
8426 pmbox->ctx_buf,
James Smart7a470272010-03-15 11:25:20 -04008427 phba->MBslimaddr +
8428 MAILBOX_HBA_EXT_OFFSET,
8429 pmbox->out_ext_byte_len);
dea31012005-04-17 16:05:31 -05008430 }
8431 }
8432
8433 writel(HA_MBATT, phba->HAregaddr);
8434 readl(phba->HAregaddr); /* flush */
8435
8436 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008437 status = mbx->mbxStatus;
dea31012005-04-17 16:05:31 -05008438 }
8439
James Smart2e0fef82007-06-17 19:56:36 -05008440 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8441 return status;
James Smart58da1ff2008-04-07 10:15:56 -04008442
8443out_not_finished:
8444 if (processing_queue) {
James Smartda0436e2009-05-22 14:51:39 -04008445 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
James Smart58da1ff2008-04-07 10:15:56 -04008446 lpfc_mbox_cmpl_put(phba, pmbox);
8447 }
8448 return MBX_NOT_FINISHED;
dea31012005-04-17 16:05:31 -05008449}
8450
James Smarte59058c2008-08-24 21:49:00 -04008451/**
James Smartf1126682009-06-10 17:22:44 -04008452 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8453 * @phba: Pointer to HBA context object.
8454 *
8455 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8456 * the driver internal pending mailbox queue. It will then try to wait out the
8457 * possible outstanding mailbox command before return.
8458 *
8459 * Returns:
8460 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8461 * the outstanding mailbox command timed out.
8462 **/
8463static int
8464lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8465{
8466 struct lpfc_sli *psli = &phba->sli;
James Smartf1126682009-06-10 17:22:44 -04008467 int rc = 0;
James Smarta183a152011-10-10 21:32:43 -04008468 unsigned long timeout = 0;
James Smartf1126682009-06-10 17:22:44 -04008469
8470 /* Mark the asynchronous mailbox command posting as blocked */
8471 spin_lock_irq(&phba->hbalock);
8472 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smartf1126682009-06-10 17:22:44 -04008473 /* Determine how long we might wait for the active mailbox
8474 * command to be gracefully completed by firmware.
8475 */
James Smarta183a152011-10-10 21:32:43 -04008476 if (phba->sli.mbox_active)
8477 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8478 phba->sli.mbox_active) *
8479 1000) + jiffies;
8480 spin_unlock_irq(&phba->hbalock);
8481
James Smarte8d3c3b2013-10-10 12:21:30 -04008482 /* Make sure the mailbox is really active */
8483 if (timeout)
8484 lpfc_sli4_process_missed_mbox_completions(phba);
8485
James Smartf1126682009-06-10 17:22:44 -04008486 /* Wait for the outstnading mailbox command to complete */
8487 while (phba->sli.mbox_active) {
8488 /* Check active mailbox complete status every 2ms */
8489 msleep(2);
8490 if (time_after(jiffies, timeout)) {
8491 /* Timeout, marked the outstanding cmd not complete */
8492 rc = 1;
8493 break;
8494 }
8495 }
8496
8497 /* Can not cleanly block async mailbox command, fails it */
8498 if (rc) {
8499 spin_lock_irq(&phba->hbalock);
8500 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8501 spin_unlock_irq(&phba->hbalock);
8502 }
8503 return rc;
8504}
8505
8506/**
8507 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8508 * @phba: Pointer to HBA context object.
8509 *
8510 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8511 * commands from the driver internal pending mailbox queue. It makes sure
8512 * that there is no outstanding mailbox command before resuming posting
8513 * asynchronous mailbox commands. If, for any reason, there is outstanding
8514 * mailbox command, it will try to wait it out before resuming asynchronous
8515 * mailbox command posting.
8516 **/
8517static void
8518lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8519{
8520 struct lpfc_sli *psli = &phba->sli;
8521
8522 spin_lock_irq(&phba->hbalock);
8523 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8524 /* Asynchronous mailbox posting is not blocked, do nothing */
8525 spin_unlock_irq(&phba->hbalock);
8526 return;
8527 }
8528
8529 /* Outstanding synchronous mailbox command is guaranteed to be done,
8530 * successful or timeout, after timing-out the outstanding mailbox
8531 * command shall always be removed, so just unblock posting async
8532 * mailbox command and resume
8533 */
8534 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8535 spin_unlock_irq(&phba->hbalock);
8536
Colin Ian King291c2542019-12-18 08:43:01 +00008537 /* wake up worker thread to post asynchronous mailbox command */
James Smartf1126682009-06-10 17:22:44 -04008538 lpfc_worker_wake_up(phba);
8539}
8540
8541/**
James Smart2d843ed2012-09-29 11:29:06 -04008542 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8543 * @phba: Pointer to HBA context object.
8544 * @mboxq: Pointer to mailbox object.
8545 *
8546 * The function waits for the bootstrap mailbox register ready bit from
8547 * port for twice the regular mailbox command timeout value.
8548 *
8549 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8550 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8551 **/
8552static int
8553lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8554{
8555 uint32_t db_ready;
8556 unsigned long timeout;
8557 struct lpfc_register bmbx_reg;
8558
8559 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8560 * 1000) + jiffies;
8561
8562 do {
8563 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8564 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8565 if (!db_ready)
James Smarte2ffe4d2019-03-12 16:30:15 -07008566 mdelay(2);
James Smart2d843ed2012-09-29 11:29:06 -04008567
8568 if (time_after(jiffies, timeout))
8569 return MBXERR_ERROR;
8570 } while (!db_ready);
8571
8572 return 0;
8573}
8574
8575/**
James Smartda0436e2009-05-22 14:51:39 -04008576 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8577 * @phba: Pointer to HBA context object.
8578 * @mboxq: Pointer to mailbox object.
8579 *
8580 * The function posts a mailbox to the port. The mailbox is expected
8581 * to be comletely filled in and ready for the port to operate on it.
8582 * This routine executes a synchronous completion operation on the
8583 * mailbox by polling for its completion.
8584 *
8585 * The caller must not be holding any locks when calling this routine.
8586 *
8587 * Returns:
8588 * MBX_SUCCESS - mailbox posted successfully
8589 * Any of the MBX error values.
8590 **/
8591static int
8592lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8593{
8594 int rc = MBX_SUCCESS;
8595 unsigned long iflag;
James Smartda0436e2009-05-22 14:51:39 -04008596 uint32_t mcqe_status;
8597 uint32_t mbx_cmnd;
James Smartda0436e2009-05-22 14:51:39 -04008598 struct lpfc_sli *psli = &phba->sli;
8599 struct lpfc_mqe *mb = &mboxq->u.mqe;
8600 struct lpfc_bmbx_create *mbox_rgn;
8601 struct dma_address *dma_address;
James Smartda0436e2009-05-22 14:51:39 -04008602
8603 /*
8604 * Only one mailbox can be active to the bootstrap mailbox region
8605 * at a time and there is no queueing provided.
8606 */
8607 spin_lock_irqsave(&phba->hbalock, iflag);
8608 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8609 spin_unlock_irqrestore(&phba->hbalock, iflag);
8610 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008611 "(%d):2532 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008612 "cannot issue Data: x%x x%x\n",
8613 mboxq->vport ? mboxq->vport->vpi : 0,
8614 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008615 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8616 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008617 psli->sli_flag, MBX_POLL);
8618 return MBXERR_ERROR;
8619 }
8620 /* The server grabs the token and owns it until release */
8621 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8622 phba->sli.mbox_active = mboxq;
8623 spin_unlock_irqrestore(&phba->hbalock, iflag);
8624
James Smart2d843ed2012-09-29 11:29:06 -04008625 /* wait for bootstrap mbox register for readyness */
8626 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8627 if (rc)
8628 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04008629 /*
8630 * Initialize the bootstrap memory region to avoid stale data areas
8631 * in the mailbox post. Then copy the caller's mailbox contents to
8632 * the bmbx mailbox region.
8633 */
8634 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8635 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
James Smart48f8fdb2018-05-04 20:37:51 -07008636 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8637 sizeof(struct lpfc_mqe));
James Smartda0436e2009-05-22 14:51:39 -04008638
8639 /* Post the high mailbox dma address to the port and wait for ready. */
8640 dma_address = &phba->sli4_hba.bmbx.dma_address;
8641 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8642
James Smart2d843ed2012-09-29 11:29:06 -04008643 /* wait for bootstrap mbox register for hi-address write done */
8644 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8645 if (rc)
8646 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04008647
8648 /* Post the low mailbox dma address to the port. */
8649 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
James Smartda0436e2009-05-22 14:51:39 -04008650
James Smart2d843ed2012-09-29 11:29:06 -04008651 /* wait for bootstrap mbox register for low address write done */
8652 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8653 if (rc)
8654 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04008655
8656 /*
8657 * Read the CQ to ensure the mailbox has completed.
8658 * If so, update the mailbox status so that the upper layers
8659 * can complete the request normally.
8660 */
James Smart48f8fdb2018-05-04 20:37:51 -07008661 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8662 sizeof(struct lpfc_mqe));
James Smartda0436e2009-05-22 14:51:39 -04008663 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
James Smart48f8fdb2018-05-04 20:37:51 -07008664 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8665 sizeof(struct lpfc_mcqe));
James Smartda0436e2009-05-22 14:51:39 -04008666 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
James Smart05580562011-05-24 11:40:48 -04008667 /*
8668 * When the CQE status indicates a failure and the mailbox status
8669 * indicates success then copy the CQE status into the mailbox status
8670 * (and prefix it with x4000).
8671 */
James Smartda0436e2009-05-22 14:51:39 -04008672 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
James Smart05580562011-05-24 11:40:48 -04008673 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8674 bf_set(lpfc_mqe_status, mb,
8675 (LPFC_MBX_ERROR_RANGE | mcqe_status));
James Smartda0436e2009-05-22 14:51:39 -04008676 rc = MBXERR_ERROR;
James Smartd7c47992010-06-08 18:31:54 -04008677 } else
8678 lpfc_sli4_swap_str(phba, mboxq);
James Smartda0436e2009-05-22 14:51:39 -04008679
8680 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008681 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
James Smartda0436e2009-05-22 14:51:39 -04008682 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8683 " x%x x%x CQ: x%x x%x x%x x%x\n",
James Smarta183a152011-10-10 21:32:43 -04008684 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8685 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8686 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008687 bf_get(lpfc_mqe_status, mb),
8688 mb->un.mb_words[0], mb->un.mb_words[1],
8689 mb->un.mb_words[2], mb->un.mb_words[3],
8690 mb->un.mb_words[4], mb->un.mb_words[5],
8691 mb->un.mb_words[6], mb->un.mb_words[7],
8692 mb->un.mb_words[8], mb->un.mb_words[9],
8693 mb->un.mb_words[10], mb->un.mb_words[11],
8694 mb->un.mb_words[12], mboxq->mcqe.word0,
8695 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8696 mboxq->mcqe.trailer);
8697exit:
8698 /* We are holding the token, no needed for lock when release */
8699 spin_lock_irqsave(&phba->hbalock, iflag);
8700 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8701 phba->sli.mbox_active = NULL;
8702 spin_unlock_irqrestore(&phba->hbalock, iflag);
8703 return rc;
8704}
8705
8706/**
8707 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8708 * @phba: Pointer to HBA context object.
8709 * @pmbox: Pointer to mailbox object.
8710 * @flag: Flag indicating how the mailbox need to be processed.
8711 *
8712 * This function is called by discovery code and HBA management code to submit
8713 * a mailbox command to firmware with SLI-4 interface spec.
8714 *
8715 * Return codes the caller owns the mailbox command after the return of the
8716 * function.
8717 **/
8718static int
8719lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8720 uint32_t flag)
8721{
8722 struct lpfc_sli *psli = &phba->sli;
8723 unsigned long iflags;
8724 int rc;
8725
James Smartb76f2dc2011-07-22 18:37:42 -04008726 /* dump from issue mailbox command if setup */
8727 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8728
James Smart8fa38512009-07-19 10:01:03 -04008729 rc = lpfc_mbox_dev_check(phba);
8730 if (unlikely(rc)) {
8731 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008732 "(%d):2544 Mailbox command x%x (x%x/x%x) "
James Smart8fa38512009-07-19 10:01:03 -04008733 "cannot issue Data: x%x x%x\n",
8734 mboxq->vport ? mboxq->vport->vpi : 0,
8735 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008736 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8737 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smart8fa38512009-07-19 10:01:03 -04008738 psli->sli_flag, flag);
8739 goto out_not_finished;
8740 }
8741
James Smartda0436e2009-05-22 14:51:39 -04008742 /* Detect polling mode and jump to a handler */
8743 if (!phba->sli4_hba.intr_enable) {
8744 if (flag == MBX_POLL)
8745 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8746 else
8747 rc = -EIO;
8748 if (rc != MBX_SUCCESS)
James Smart05580562011-05-24 11:40:48 -04008749 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
James Smartda0436e2009-05-22 14:51:39 -04008750 "(%d):2541 Mailbox command x%x "
James Smartcc459f12012-05-09 21:18:30 -04008751 "(x%x/x%x) failure: "
8752 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8753 "Data: x%x x%x\n,",
James Smartda0436e2009-05-22 14:51:39 -04008754 mboxq->vport ? mboxq->vport->vpi : 0,
8755 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008756 lpfc_sli_config_mbox_subsys_get(phba,
8757 mboxq),
8758 lpfc_sli_config_mbox_opcode_get(phba,
8759 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04008760 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8761 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8762 bf_get(lpfc_mcqe_ext_status,
8763 &mboxq->mcqe),
James Smartda0436e2009-05-22 14:51:39 -04008764 psli->sli_flag, flag);
8765 return rc;
8766 } else if (flag == MBX_POLL) {
James Smartf1126682009-06-10 17:22:44 -04008767 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8768 "(%d):2542 Try to issue mailbox command "
James Smart7365f6f2018-02-22 08:18:46 -08008769 "x%x (x%x/x%x) synchronously ahead of async "
James Smartf1126682009-06-10 17:22:44 -04008770 "mailbox command queue: x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04008771 mboxq->vport ? mboxq->vport->vpi : 0,
8772 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008773 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8774 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008775 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04008776 /* Try to block the asynchronous mailbox posting */
8777 rc = lpfc_sli4_async_mbox_block(phba);
8778 if (!rc) {
8779 /* Successfully blocked, now issue sync mbox cmd */
8780 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8781 if (rc != MBX_SUCCESS)
James Smartcc459f12012-05-09 21:18:30 -04008782 lpfc_printf_log(phba, KERN_WARNING,
James Smarta183a152011-10-10 21:32:43 -04008783 LOG_MBOX | LOG_SLI,
James Smartcc459f12012-05-09 21:18:30 -04008784 "(%d):2597 Sync Mailbox command "
8785 "x%x (x%x/x%x) failure: "
8786 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8787 "Data: x%x x%x\n,",
8788 mboxq->vport ? mboxq->vport->vpi : 0,
James Smarta183a152011-10-10 21:32:43 -04008789 mboxq->u.mb.mbxCommand,
8790 lpfc_sli_config_mbox_subsys_get(phba,
8791 mboxq),
8792 lpfc_sli_config_mbox_opcode_get(phba,
8793 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04008794 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8795 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8796 bf_get(lpfc_mcqe_ext_status,
8797 &mboxq->mcqe),
James Smarta183a152011-10-10 21:32:43 -04008798 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04008799 /* Unblock the async mailbox posting afterward */
8800 lpfc_sli4_async_mbox_unblock(phba);
8801 }
8802 return rc;
James Smartda0436e2009-05-22 14:51:39 -04008803 }
8804
Colin Ian King291c2542019-12-18 08:43:01 +00008805 /* Now, interrupt mode asynchronous mailbox command */
James Smartda0436e2009-05-22 14:51:39 -04008806 rc = lpfc_mbox_cmd_check(phba, mboxq);
8807 if (rc) {
8808 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008809 "(%d):2543 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008810 "cannot issue Data: x%x x%x\n",
8811 mboxq->vport ? mboxq->vport->vpi : 0,
8812 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008813 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8814 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008815 psli->sli_flag, flag);
8816 goto out_not_finished;
8817 }
James Smartda0436e2009-05-22 14:51:39 -04008818
8819 /* Put the mailbox command to the driver internal FIFO */
8820 psli->slistat.mbox_busy++;
8821 spin_lock_irqsave(&phba->hbalock, iflags);
8822 lpfc_mbox_put(phba, mboxq);
8823 spin_unlock_irqrestore(&phba->hbalock, iflags);
8824 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8825 "(%d):0354 Mbox cmd issue - Enqueue Data: "
James Smarta183a152011-10-10 21:32:43 -04008826 "x%x (x%x/x%x) x%x x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04008827 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8828 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
James Smarta183a152011-10-10 21:32:43 -04008829 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8830 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008831 phba->pport->port_state,
8832 psli->sli_flag, MBX_NOWAIT);
8833 /* Wake up worker thread to transport mailbox command from head */
8834 lpfc_worker_wake_up(phba);
8835
8836 return MBX_BUSY;
8837
8838out_not_finished:
8839 return MBX_NOT_FINISHED;
8840}
8841
8842/**
8843 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8844 * @phba: Pointer to HBA context object.
8845 *
8846 * This function is called by worker thread to send a mailbox command to
8847 * SLI4 HBA firmware.
8848 *
8849 **/
8850int
8851lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8852{
8853 struct lpfc_sli *psli = &phba->sli;
8854 LPFC_MBOXQ_t *mboxq;
8855 int rc = MBX_SUCCESS;
8856 unsigned long iflags;
8857 struct lpfc_mqe *mqe;
8858 uint32_t mbx_cmnd;
8859
8860 /* Check interrupt mode before post async mailbox command */
8861 if (unlikely(!phba->sli4_hba.intr_enable))
8862 return MBX_NOT_FINISHED;
8863
8864 /* Check for mailbox command service token */
8865 spin_lock_irqsave(&phba->hbalock, iflags);
8866 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8867 spin_unlock_irqrestore(&phba->hbalock, iflags);
8868 return MBX_NOT_FINISHED;
8869 }
8870 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8871 spin_unlock_irqrestore(&phba->hbalock, iflags);
8872 return MBX_NOT_FINISHED;
8873 }
8874 if (unlikely(phba->sli.mbox_active)) {
8875 spin_unlock_irqrestore(&phba->hbalock, iflags);
8876 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8877 "0384 There is pending active mailbox cmd\n");
8878 return MBX_NOT_FINISHED;
8879 }
8880 /* Take the mailbox command service token */
8881 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8882
8883 /* Get the next mailbox command from head of queue */
8884 mboxq = lpfc_mbox_get(phba);
8885
8886 /* If no more mailbox command waiting for post, we're done */
8887 if (!mboxq) {
8888 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8889 spin_unlock_irqrestore(&phba->hbalock, iflags);
8890 return MBX_SUCCESS;
8891 }
8892 phba->sli.mbox_active = mboxq;
8893 spin_unlock_irqrestore(&phba->hbalock, iflags);
8894
8895 /* Check device readiness for posting mailbox command */
8896 rc = lpfc_mbox_dev_check(phba);
8897 if (unlikely(rc))
8898 /* Driver clean routine will clean up pending mailbox */
8899 goto out_not_finished;
8900
8901 /* Prepare the mbox command to be posted */
8902 mqe = &mboxq->u.mqe;
8903 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8904
8905 /* Start timer for the mbox_tmo and log some mailbox post messages */
8906 mod_timer(&psli->mbox_tmo, (jiffies +
James Smart256ec0d2013-04-17 20:14:58 -04008907 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
James Smartda0436e2009-05-22 14:51:39 -04008908
8909 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008910 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
James Smartda0436e2009-05-22 14:51:39 -04008911 "x%x x%x\n",
8912 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
James Smarta183a152011-10-10 21:32:43 -04008913 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8914 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008915 phba->pport->port_state, psli->sli_flag);
8916
8917 if (mbx_cmnd != MBX_HEARTBEAT) {
8918 if (mboxq->vport) {
8919 lpfc_debugfs_disc_trc(mboxq->vport,
8920 LPFC_DISC_TRC_MBOX_VPORT,
8921 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8922 mbx_cmnd, mqe->un.mb_words[0],
8923 mqe->un.mb_words[1]);
8924 } else {
8925 lpfc_debugfs_disc_trc(phba->pport,
8926 LPFC_DISC_TRC_MBOX,
8927 "MBOX Send: cmd:x%x mb:x%x x%x",
8928 mbx_cmnd, mqe->un.mb_words[0],
8929 mqe->un.mb_words[1]);
8930 }
8931 }
8932 psli->slistat.mbox_cmd++;
8933
8934 /* Post the mailbox command to the port */
8935 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8936 if (rc != MBX_SUCCESS) {
8937 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008938 "(%d):2533 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008939 "cannot issue Data: x%x x%x\n",
8940 mboxq->vport ? mboxq->vport->vpi : 0,
8941 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008942 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8943 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008944 psli->sli_flag, MBX_NOWAIT);
8945 goto out_not_finished;
8946 }
8947
8948 return rc;
8949
8950out_not_finished:
8951 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartd7069f02012-03-01 22:36:29 -05008952 if (phba->sli.mbox_active) {
8953 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8954 __lpfc_mbox_cmpl_put(phba, mboxq);
8955 /* Release the token */
8956 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8957 phba->sli.mbox_active = NULL;
8958 }
James Smartda0436e2009-05-22 14:51:39 -04008959 spin_unlock_irqrestore(&phba->hbalock, iflags);
8960
8961 return MBX_NOT_FINISHED;
8962}
8963
8964/**
8965 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8966 * @phba: Pointer to HBA context object.
8967 * @pmbox: Pointer to mailbox object.
8968 * @flag: Flag indicating how the mailbox need to be processed.
8969 *
8970 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8971 * the API jump table function pointer from the lpfc_hba struct.
8972 *
8973 * Return codes the caller owns the mailbox command after the return of the
8974 * function.
8975 **/
8976int
8977lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8978{
8979 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8980}
8981
8982/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008983 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
James Smartda0436e2009-05-22 14:51:39 -04008984 * @phba: The hba struct for which this call is being executed.
8985 * @dev_grp: The HBA PCI-Device group number.
8986 *
8987 * This routine sets up the mbox interface API function jump table in @phba
8988 * struct.
8989 * Returns: 0 - success, -ENODEV - failure.
8990 **/
8991int
8992lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8993{
8994
8995 switch (dev_grp) {
8996 case LPFC_PCI_DEV_LP:
8997 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8998 phba->lpfc_sli_handle_slow_ring_event =
8999 lpfc_sli_handle_slow_ring_event_s3;
9000 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9001 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9002 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9003 break;
9004 case LPFC_PCI_DEV_OC:
9005 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9006 phba->lpfc_sli_handle_slow_ring_event =
9007 lpfc_sli_handle_slow_ring_event_s4;
9008 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9009 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9010 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9011 break;
9012 default:
9013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9014 "1420 Invalid HBA PCI-device group: 0x%x\n",
9015 dev_grp);
9016 return -ENODEV;
9017 break;
9018 }
9019 return 0;
9020}
9021
9022/**
James Smart3621a712009-04-06 18:47:14 -04009023 * __lpfc_sli_ringtx_put - Add an iocb to the txq
James Smarte59058c2008-08-24 21:49:00 -04009024 * @phba: Pointer to HBA context object.
9025 * @pring: Pointer to driver SLI ring object.
9026 * @piocb: Pointer to address of newly added command iocb.
9027 *
James Smart27f3efd2019-10-18 14:18:19 -07009028 * This function is called with hbalock held for SLI3 ports or
9029 * the ring lock held for SLI4 ports to add a command
James Smarte59058c2008-08-24 21:49:00 -04009030 * iocb to the txq when SLI layer cannot submit the command iocb
9031 * to the ring.
9032 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04009033void
James Smart92d7f7b2007-06-17 19:56:38 -05009034__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05009035 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05009036{
James Smart27f3efd2019-10-18 14:18:19 -07009037 if (phba->sli_rev == LPFC_SLI_REV4)
9038 lockdep_assert_held(&pring->ring_lock);
9039 else
9040 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05009041 /* Insert the caller's iocb in the txq tail for later processing. */
9042 list_add_tail(&piocb->list, &pring->txq);
dea31012005-04-17 16:05:31 -05009043}
9044
James Smarte59058c2008-08-24 21:49:00 -04009045/**
James Smart3621a712009-04-06 18:47:14 -04009046 * lpfc_sli_next_iocb - Get the next iocb in the txq
James Smarte59058c2008-08-24 21:49:00 -04009047 * @phba: Pointer to HBA context object.
9048 * @pring: Pointer to driver SLI ring object.
9049 * @piocb: Pointer to address of newly added command iocb.
9050 *
9051 * This function is called with hbalock held before a new
9052 * iocb is submitted to the firmware. This function checks
9053 * txq to flush the iocbs in txq to Firmware before
9054 * submitting new iocbs to the Firmware.
9055 * If there are iocbs in the txq which need to be submitted
9056 * to firmware, lpfc_sli_next_iocb returns the first element
9057 * of the txq after dequeuing it from txq.
9058 * If there is no iocb in the txq then the function will return
9059 * *piocb and *piocb is set to NULL. Caller needs to check
9060 * *piocb to find if there are more commands in the txq.
9061 **/
dea31012005-04-17 16:05:31 -05009062static struct lpfc_iocbq *
9063lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05009064 struct lpfc_iocbq **piocb)
dea31012005-04-17 16:05:31 -05009065{
9066 struct lpfc_iocbq * nextiocb;
9067
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009068 lockdep_assert_held(&phba->hbalock);
9069
dea31012005-04-17 16:05:31 -05009070 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9071 if (!nextiocb) {
9072 nextiocb = *piocb;
9073 *piocb = NULL;
9074 }
9075
9076 return nextiocb;
9077}
9078
James Smarte59058c2008-08-24 21:49:00 -04009079/**
James Smart3772a992009-05-22 14:50:54 -04009080 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04009081 * @phba: Pointer to HBA context object.
James Smart3772a992009-05-22 14:50:54 -04009082 * @ring_number: SLI ring number to issue iocb on.
James Smarte59058c2008-08-24 21:49:00 -04009083 * @piocb: Pointer to command iocb.
9084 * @flag: Flag indicating if this command can be put into txq.
9085 *
James Smart3772a992009-05-22 14:50:54 -04009086 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9087 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9088 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9089 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9090 * this function allows only iocbs for posting buffers. This function finds
9091 * next available slot in the command ring and posts the command to the
9092 * available slot and writes the port attention register to request HBA start
9093 * processing new iocb. If there is no slot available in the ring and
9094 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9095 * the function returns IOCB_BUSY.
James Smarte59058c2008-08-24 21:49:00 -04009096 *
James Smart3772a992009-05-22 14:50:54 -04009097 * This function is called with hbalock held. The function will return success
9098 * after it successfully submit the iocb to firmware or after adding to the
9099 * txq.
James Smarte59058c2008-08-24 21:49:00 -04009100 **/
James Smart98c9ea52007-10-27 13:37:33 -04009101static int
James Smart3772a992009-05-22 14:50:54 -04009102__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea31012005-04-17 16:05:31 -05009103 struct lpfc_iocbq *piocb, uint32_t flag)
9104{
9105 struct lpfc_iocbq *nextiocb;
9106 IOCB_t *iocb;
James Smart895427b2017-02-12 13:52:30 -08009107 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -05009108
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009109 lockdep_assert_held(&phba->hbalock);
9110
James Smart92d7f7b2007-06-17 19:56:38 -05009111 if (piocb->iocb_cmpl && (!piocb->vport) &&
9112 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9113 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9114 lpfc_printf_log(phba, KERN_ERR,
9115 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04009116 "1807 IOCB x%x failed. No vport\n",
James Smart92d7f7b2007-06-17 19:56:38 -05009117 piocb->iocb.ulpCommand);
9118 dump_stack();
9119 return IOCB_ERROR;
9120 }
9121
9122
Linas Vepstas8d63f372007-02-14 14:28:36 -06009123 /* If the PCI channel is in offline state, do not post iocbs. */
9124 if (unlikely(pci_channel_offline(phba->pcidev)))
9125 return IOCB_ERROR;
9126
James Smarta257bf92009-04-06 18:48:10 -04009127 /* If HBA has a deferred error attention, fail the iocb. */
9128 if (unlikely(phba->hba_flag & DEFER_ERATT))
9129 return IOCB_ERROR;
9130
dea31012005-04-17 16:05:31 -05009131 /*
9132 * We should never get an IOCB if we are in a < LINK_DOWN state
9133 */
James Smart2e0fef82007-06-17 19:56:36 -05009134 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea31012005-04-17 16:05:31 -05009135 return IOCB_ERROR;
9136
9137 /*
9138 * Check to see if we are blocking IOCB processing because of a
James Smart0b727fe2007-10-27 13:37:25 -04009139 * outstanding event.
dea31012005-04-17 16:05:31 -05009140 */
James Smart0b727fe2007-10-27 13:37:25 -04009141 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea31012005-04-17 16:05:31 -05009142 goto iocb_busy;
9143
James Smart2e0fef82007-06-17 19:56:36 -05009144 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea31012005-04-17 16:05:31 -05009145 /*
James Smart2680eea2007-04-25 09:52:55 -04009146 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea31012005-04-17 16:05:31 -05009147 * can be issued if the link is not up.
9148 */
9149 switch (piocb->iocb.ulpCommand) {
James Smart84774a42008-08-24 21:50:06 -04009150 case CMD_GEN_REQUEST64_CR:
9151 case CMD_GEN_REQUEST64_CX:
9152 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9153 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
James Smart6a9c52c2009-10-02 15:16:51 -04009154 FC_RCTL_DD_UNSOL_CMD) ||
James Smart84774a42008-08-24 21:50:06 -04009155 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9156 MENLO_TRANSPORT_TYPE))
9157
9158 goto iocb_busy;
9159 break;
dea31012005-04-17 16:05:31 -05009160 case CMD_QUE_RING_BUF_CN:
9161 case CMD_QUE_RING_BUF64_CN:
dea31012005-04-17 16:05:31 -05009162 /*
9163 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9164 * completion, iocb_cmpl MUST be 0.
9165 */
9166 if (piocb->iocb_cmpl)
9167 piocb->iocb_cmpl = NULL;
9168 /*FALLTHROUGH*/
9169 case CMD_CREATE_XRI_CR:
James Smart2680eea2007-04-25 09:52:55 -04009170 case CMD_CLOSE_XRI_CN:
9171 case CMD_CLOSE_XRI_CX:
dea31012005-04-17 16:05:31 -05009172 break;
9173 default:
9174 goto iocb_busy;
9175 }
9176
9177 /*
9178 * For FCP commands, we must be in a state where we can process link
9179 * attention events.
9180 */
James Smart895427b2017-02-12 13:52:30 -08009181 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
James Smart92d7f7b2007-06-17 19:56:38 -05009182 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea31012005-04-17 16:05:31 -05009183 goto iocb_busy;
James Smart92d7f7b2007-06-17 19:56:38 -05009184 }
dea31012005-04-17 16:05:31 -05009185
dea31012005-04-17 16:05:31 -05009186 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9187 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9188 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9189
9190 if (iocb)
9191 lpfc_sli_update_ring(phba, pring);
9192 else
9193 lpfc_sli_update_full_ring(phba, pring);
9194
9195 if (!piocb)
9196 return IOCB_SUCCESS;
9197
9198 goto out_busy;
9199
9200 iocb_busy:
9201 pring->stats.iocb_cmd_delay++;
9202
9203 out_busy:
9204
9205 if (!(flag & SLI_IOCB_RET_IOCB)) {
James Smart92d7f7b2007-06-17 19:56:38 -05009206 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea31012005-04-17 16:05:31 -05009207 return IOCB_SUCCESS;
9208 }
9209
9210 return IOCB_BUSY;
9211}
9212
James Smart3772a992009-05-22 14:50:54 -04009213/**
James Smart4f774512009-05-22 14:52:35 -04009214 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9215 * @phba: Pointer to HBA context object.
9216 * @piocb: Pointer to command iocb.
9217 * @sglq: Pointer to the scatter gather queue object.
9218 *
9219 * This routine converts the bpl or bde that is in the IOCB
9220 * to a sgl list for the sli4 hardware. The physical address
9221 * of the bpl/bde is converted back to a virtual address.
9222 * If the IOCB contains a BPL then the list of BDE's is
9223 * converted to sli4_sge's. If the IOCB contains a single
9224 * BDE then it is converted to a single sli_sge.
9225 * The IOCB is still in cpu endianess so the contents of
9226 * the bpl can be used without byte swapping.
9227 *
9228 * Returns valid XRI = Success, NO_XRI = Failure.
9229**/
9230static uint16_t
9231lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9232 struct lpfc_sglq *sglq)
9233{
9234 uint16_t xritag = NO_XRI;
9235 struct ulp_bde64 *bpl = NULL;
9236 struct ulp_bde64 bde;
9237 struct sli4_sge *sgl = NULL;
James Smart1b511972011-12-13 13:23:09 -05009238 struct lpfc_dmabuf *dmabuf;
James Smart4f774512009-05-22 14:52:35 -04009239 IOCB_t *icmd;
9240 int numBdes = 0;
9241 int i = 0;
James Smart63e801c2010-11-20 23:14:19 -05009242 uint32_t offset = 0; /* accumulated offset in the sg request list */
9243 int inbound = 0; /* number of sg reply entries inbound from firmware */
James Smart4f774512009-05-22 14:52:35 -04009244
9245 if (!piocbq || !sglq)
9246 return xritag;
9247
9248 sgl = (struct sli4_sge *)sglq->sgl;
9249 icmd = &piocbq->iocb;
James Smart6b5151f2012-01-18 16:24:06 -05009250 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9251 return sglq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04009252 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9253 numBdes = icmd->un.genreq64.bdl.bdeSize /
9254 sizeof(struct ulp_bde64);
9255 /* The addrHigh and addrLow fields within the IOCB
9256 * have not been byteswapped yet so there is no
9257 * need to swap them back.
9258 */
James Smart1b511972011-12-13 13:23:09 -05009259 if (piocbq->context3)
9260 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9261 else
9262 return xritag;
James Smart4f774512009-05-22 14:52:35 -04009263
James Smart1b511972011-12-13 13:23:09 -05009264 bpl = (struct ulp_bde64 *)dmabuf->virt;
James Smart4f774512009-05-22 14:52:35 -04009265 if (!bpl)
9266 return xritag;
9267
9268 for (i = 0; i < numBdes; i++) {
9269 /* Should already be byte swapped. */
James Smart28baac72010-02-12 14:42:03 -05009270 sgl->addr_hi = bpl->addrHigh;
9271 sgl->addr_lo = bpl->addrLow;
9272
James Smart05580562011-05-24 11:40:48 -04009273 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04009274 if ((i+1) == numBdes)
9275 bf_set(lpfc_sli4_sge_last, sgl, 1);
9276 else
9277 bf_set(lpfc_sli4_sge_last, sgl, 0);
James Smart28baac72010-02-12 14:42:03 -05009278 /* swap the size field back to the cpu so we
9279 * can assign it to the sgl.
9280 */
9281 bde.tus.w = le32_to_cpu(bpl->tus.w);
9282 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
James Smart63e801c2010-11-20 23:14:19 -05009283 /* The offsets in the sgl need to be accumulated
9284 * separately for the request and reply lists.
9285 * The request is always first, the reply follows.
9286 */
9287 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9288 /* add up the reply sg entries */
9289 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9290 inbound++;
9291 /* first inbound? reset the offset */
9292 if (inbound == 1)
9293 offset = 0;
9294 bf_set(lpfc_sli4_sge_offset, sgl, offset);
James Smartf9bb2da2011-10-10 21:34:11 -04009295 bf_set(lpfc_sli4_sge_type, sgl,
9296 LPFC_SGE_TYPE_DATA);
James Smart63e801c2010-11-20 23:14:19 -05009297 offset += bde.tus.f.bdeSize;
9298 }
James Smart546fc852011-03-11 16:06:29 -05009299 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04009300 bpl++;
9301 sgl++;
9302 }
9303 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9304 /* The addrHigh and addrLow fields of the BDE have not
9305 * been byteswapped yet so they need to be swapped
9306 * before putting them in the sgl.
9307 */
9308 sgl->addr_hi =
9309 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9310 sgl->addr_lo =
9311 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
James Smart05580562011-05-24 11:40:48 -04009312 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04009313 bf_set(lpfc_sli4_sge_last, sgl, 1);
9314 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart28baac72010-02-12 14:42:03 -05009315 sgl->sge_len =
9316 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
James Smart4f774512009-05-22 14:52:35 -04009317 }
9318 return sglq->sli4_xritag;
9319}
9320
9321/**
James Smart4f774512009-05-22 14:52:35 -04009322 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9323 * @phba: Pointer to HBA context object.
9324 * @piocb: Pointer to command iocb.
9325 * @wqe: Pointer to the work queue entry.
9326 *
9327 * This routine converts the iocb command to its Work Queue Entry
9328 * equivalent. The wqe pointer should not have any fields set when
9329 * this routine is called because it will memcpy over them.
9330 * This routine does not set the CQ_ID or the WQEC bits in the
9331 * wqe.
9332 *
9333 * Returns: 0 = Success, IOCB_ERROR = Failure.
9334 **/
9335static int
9336lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
James Smart205e8242018-03-05 12:04:03 -08009337 union lpfc_wqe128 *wqe)
James Smart4f774512009-05-22 14:52:35 -04009338{
James Smart5ffc2662009-11-18 15:39:44 -05009339 uint32_t xmit_len = 0, total_len = 0;
James Smart4f774512009-05-22 14:52:35 -04009340 uint8_t ct = 0;
9341 uint32_t fip;
9342 uint32_t abort_tag;
9343 uint8_t command_type = ELS_COMMAND_NON_FIP;
9344 uint8_t cmnd;
9345 uint16_t xritag;
James Smartdcf2a4e2010-09-29 11:18:53 -04009346 uint16_t abrt_iotag;
9347 struct lpfc_iocbq *abrtiocbq;
James Smart4f774512009-05-22 14:52:35 -04009348 struct ulp_bde64 *bpl = NULL;
James Smartf0d9bcc2010-10-22 11:07:09 -04009349 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
James Smart5ffc2662009-11-18 15:39:44 -05009350 int numBdes, i;
9351 struct ulp_bde64 bde;
James Smartc31098c2011-04-16 11:03:33 -04009352 struct lpfc_nodelist *ndlp;
James Smartff78d8f2011-12-13 13:21:35 -05009353 uint32_t *pcmd;
James Smart1b511972011-12-13 13:23:09 -05009354 uint32_t if_type;
James Smart4f774512009-05-22 14:52:35 -04009355
James Smart45ed1192009-10-02 15:17:02 -04009356 fip = phba->hba_flag & HBA_FIP_SUPPORT;
James Smart4f774512009-05-22 14:52:35 -04009357 /* The fcp commands will set command type */
James Smart0c287582009-06-10 17:22:56 -04009358 if (iocbq->iocb_flag & LPFC_IO_FCP)
James Smart4f774512009-05-22 14:52:35 -04009359 command_type = FCP_COMMAND;
James Smartc8685952009-11-18 15:39:16 -05009360 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
James Smart0c287582009-06-10 17:22:56 -04009361 command_type = ELS_COMMAND_FIP;
9362 else
9363 command_type = ELS_COMMAND_NON_FIP;
9364
James Smartb5c53952016-03-31 14:12:30 -07009365 if (phba->fcp_embed_io)
9366 memset(wqe, 0, sizeof(union lpfc_wqe128));
James Smart4f774512009-05-22 14:52:35 -04009367 /* Some of the fields are in the right position already */
9368 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
James Smarte62245d2019-08-14 16:57:08 -07009369 /* The ct field has moved so reset */
9370 wqe->generic.wqe_com.word7 = 0;
9371 wqe->generic.wqe_com.word10 = 0;
James Smartb5c53952016-03-31 14:12:30 -07009372
9373 abort_tag = (uint32_t) iocbq->iotag;
9374 xritag = iocbq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04009375 /* words0-2 bpl convert bde */
9376 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
James Smart5ffc2662009-11-18 15:39:44 -05009377 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9378 sizeof(struct ulp_bde64);
James Smart4f774512009-05-22 14:52:35 -04009379 bpl = (struct ulp_bde64 *)
9380 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9381 if (!bpl)
9382 return IOCB_ERROR;
9383
9384 /* Should already be byte swapped. */
9385 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9386 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9387 /* swap the size field back to the cpu so we
9388 * can assign it to the sgl.
9389 */
9390 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
James Smart5ffc2662009-11-18 15:39:44 -05009391 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9392 total_len = 0;
9393 for (i = 0; i < numBdes; i++) {
9394 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9395 total_len += bde.tus.f.bdeSize;
9396 }
James Smart4f774512009-05-22 14:52:35 -04009397 } else
James Smart5ffc2662009-11-18 15:39:44 -05009398 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
James Smart4f774512009-05-22 14:52:35 -04009399
9400 iocbq->iocb.ulpIoTag = iocbq->iotag;
9401 cmnd = iocbq->iocb.ulpCommand;
9402
9403 switch (iocbq->iocb.ulpCommand) {
9404 case CMD_ELS_REQUEST64_CR:
James Smart93d1379e2012-05-09 21:19:34 -04009405 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9406 ndlp = iocbq->context_un.ndlp;
9407 else
9408 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04009409 if (!iocbq->iocb.ulpLe) {
9410 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9411 "2007 Only Limited Edition cmd Format"
9412 " supported 0x%x\n",
9413 iocbq->iocb.ulpCommand);
9414 return IOCB_ERROR;
9415 }
James Smartff78d8f2011-12-13 13:21:35 -05009416
James Smart5ffc2662009-11-18 15:39:44 -05009417 wqe->els_req.payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04009418 /* Els_reguest64 has a TMO */
9419 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9420 iocbq->iocb.ulpTimeout);
9421 /* Need a VF for word 4 set the vf bit*/
9422 bf_set(els_req64_vf, &wqe->els_req, 0);
9423 /* And a VFID for word 12 */
9424 bf_set(els_req64_vfid, &wqe->els_req, 0);
James Smart4f774512009-05-22 14:52:35 -04009425 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
James Smartf0d9bcc2010-10-22 11:07:09 -04009426 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9427 iocbq->iocb.ulpContext);
9428 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9429 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
James Smart4f774512009-05-22 14:52:35 -04009430 /* CCP CCPE PV PRI in word10 were set in the memcpy */
James Smartff78d8f2011-12-13 13:21:35 -05009431 if (command_type == ELS_COMMAND_FIP)
James Smartc8685952009-11-18 15:39:16 -05009432 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9433 >> LPFC_FIP_ELS_ID_SHIFT);
James Smartff78d8f2011-12-13 13:21:35 -05009434 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9435 iocbq->context2)->virt);
James Smart1b511972011-12-13 13:23:09 -05009436 if_type = bf_get(lpfc_sli_intf_if_type,
9437 &phba->sli4_hba.sli_intf);
James Smart27d6ac02018-02-22 08:18:42 -08009438 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
James Smartff78d8f2011-12-13 13:21:35 -05009439 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
James Smartcb69f7d2011-12-13 13:21:57 -05009440 *pcmd == ELS_CMD_SCR ||
James Smartdf3fe762020-02-10 09:31:55 -08009441 *pcmd == ELS_CMD_RDF ||
James Smartf60cb932019-05-14 14:58:05 -07009442 *pcmd == ELS_CMD_RSCN_XMT ||
James Smart6b5151f2012-01-18 16:24:06 -05009443 *pcmd == ELS_CMD_FDISC ||
James Smartbdcd2b92012-03-01 22:33:52 -05009444 *pcmd == ELS_CMD_LOGO ||
James Smartff78d8f2011-12-13 13:21:35 -05009445 *pcmd == ELS_CMD_PLOGI)) {
9446 bf_set(els_req64_sp, &wqe->els_req, 1);
9447 bf_set(els_req64_sid, &wqe->els_req,
9448 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04009449 if ((*pcmd == ELS_CMD_FLOGI) &&
9450 !(phba->fc_topology ==
9451 LPFC_TOPOLOGY_LOOP))
9452 bf_set(els_req64_sid, &wqe->els_req, 0);
James Smartff78d8f2011-12-13 13:21:35 -05009453 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9454 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
James Smarta7dd9c02012-05-09 21:16:50 -04009455 phba->vpi_ids[iocbq->vport->vpi]);
James Smart3ef6d242012-01-18 16:23:48 -05009456 } else if (pcmd && iocbq->context1) {
James Smartff78d8f2011-12-13 13:21:35 -05009457 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9458 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9459 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9460 }
James Smartc8685952009-11-18 15:39:16 -05009461 }
James Smart6d368e52011-05-24 11:44:12 -04009462 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9463 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009464 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9465 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9466 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9467 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9468 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9469 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04009470 wqe->els_req.max_response_payload_len = total_len - xmit_len;
James Smart7851fe22011-07-22 18:36:52 -04009471 break;
James Smart5ffc2662009-11-18 15:39:44 -05009472 case CMD_XMIT_SEQUENCE64_CX:
James Smartf0d9bcc2010-10-22 11:07:09 -04009473 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9474 iocbq->iocb.un.ulpWord[3]);
9475 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04009476 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart5ffc2662009-11-18 15:39:44 -05009477 /* The entire sequence is transmitted for this IOCB */
9478 xmit_len = total_len;
9479 cmnd = CMD_XMIT_SEQUENCE64_CR;
James Smart1b511972011-12-13 13:23:09 -05009480 if (phba->link_flag & LS_LOOPBACK_MODE)
9481 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -06009482 /* fall through */
James Smart4f774512009-05-22 14:52:35 -04009483 case CMD_XMIT_SEQUENCE64_CR:
James Smartf0d9bcc2010-10-22 11:07:09 -04009484 /* word3 iocb=io_tag32 wqe=reserved */
9485 wqe->xmit_sequence.rsvd3 = 0;
James Smart4f774512009-05-22 14:52:35 -04009486 /* word4 relative_offset memcpy */
9487 /* word5 r_ctl/df_ctl memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04009488 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9489 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9490 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9491 LPFC_WQE_IOD_WRITE);
9492 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9493 LPFC_WQE_LENLOC_WORD12);
9494 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
James Smart5ffc2662009-11-18 15:39:44 -05009495 wqe->xmit_sequence.xmit_len = xmit_len;
9496 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04009497 break;
James Smart4f774512009-05-22 14:52:35 -04009498 case CMD_XMIT_BCAST64_CN:
James Smartf0d9bcc2010-10-22 11:07:09 -04009499 /* word3 iocb=iotag32 wqe=seq_payload_len */
9500 wqe->xmit_bcast64.seq_payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04009501 /* word4 iocb=rsvd wqe=rsvd */
9502 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9503 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
James Smartf0d9bcc2010-10-22 11:07:09 -04009504 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04009505 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
James Smartf0d9bcc2010-10-22 11:07:09 -04009506 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9507 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9508 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9509 LPFC_WQE_LENLOC_WORD3);
9510 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04009511 break;
James Smart4f774512009-05-22 14:52:35 -04009512 case CMD_FCP_IWRITE64_CR:
9513 command_type = FCP_COMMAND_DATA_OUT;
James Smartf0d9bcc2010-10-22 11:07:09 -04009514 /* word3 iocb=iotag wqe=payload_offset_len */
9515 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04009516 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9517 xmit_len + sizeof(struct fcp_rsp));
9518 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9519 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04009520 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9521 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9522 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9523 iocbq->iocb.ulpFCP2Rcvy);
9524 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9525 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04009526 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9527 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9528 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04009529 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05009530 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05009531 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9532 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07009533 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9534 if (iocbq->priority) {
9535 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9536 (iocbq->priority << 1));
9537 } else {
James Smart1ba981f2014-02-20 09:56:45 -05009538 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9539 (phba->cfg_XLanePriority << 1));
9540 }
9541 }
James Smartb5c53952016-03-31 14:12:30 -07009542 /* Note, word 10 is already initialized to 0 */
9543
James Smart414abe02018-06-26 08:24:26 -07009544 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9545 if (phba->cfg_enable_pbde)
James Smart0bc2b7c2018-02-22 08:18:48 -08009546 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9547 else
9548 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9549
James Smartb5c53952016-03-31 14:12:30 -07009550 if (phba->fcp_embed_io) {
James Smartc4908502019-01-28 11:14:28 -08009551 struct lpfc_io_buf *lpfc_cmd;
James Smartb5c53952016-03-31 14:12:30 -07009552 struct sli4_sge *sgl;
James Smartb5c53952016-03-31 14:12:30 -07009553 struct fcp_cmnd *fcp_cmnd;
9554 uint32_t *ptr;
9555
9556 /* 128 byte wqe support here */
James Smartb5c53952016-03-31 14:12:30 -07009557
9558 lpfc_cmd = iocbq->context1;
James Smart0794d602019-01-28 11:14:19 -08009559 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smartb5c53952016-03-31 14:12:30 -07009560 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9561
9562 /* Word 0-2 - FCP_CMND */
James Smart205e8242018-03-05 12:04:03 -08009563 wqe->generic.bde.tus.f.bdeFlags =
James Smartb5c53952016-03-31 14:12:30 -07009564 BUFF_TYPE_BDE_IMMED;
James Smart205e8242018-03-05 12:04:03 -08009565 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9566 wqe->generic.bde.addrHigh = 0;
9567 wqe->generic.bde.addrLow = 88; /* Word 22 */
James Smartb5c53952016-03-31 14:12:30 -07009568
James Smart205e8242018-03-05 12:04:03 -08009569 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9570 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
James Smartb5c53952016-03-31 14:12:30 -07009571
9572 /* Word 22-29 FCP CMND Payload */
James Smart205e8242018-03-05 12:04:03 -08009573 ptr = &wqe->words[22];
James Smartb5c53952016-03-31 14:12:30 -07009574 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9575 }
James Smart7851fe22011-07-22 18:36:52 -04009576 break;
James Smartf0d9bcc2010-10-22 11:07:09 -04009577 case CMD_FCP_IREAD64_CR:
9578 /* word3 iocb=iotag wqe=payload_offset_len */
9579 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04009580 bf_set(payload_offset_len, &wqe->fcp_iread,
9581 xmit_len + sizeof(struct fcp_rsp));
9582 bf_set(cmd_buff_len, &wqe->fcp_iread,
9583 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04009584 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9585 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9586 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9587 iocbq->iocb.ulpFCP2Rcvy);
9588 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
James Smart4f774512009-05-22 14:52:35 -04009589 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04009590 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9591 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9592 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04009593 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05009594 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05009595 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9596 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07009597 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9598 if (iocbq->priority) {
9599 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9600 (iocbq->priority << 1));
9601 } else {
James Smart1ba981f2014-02-20 09:56:45 -05009602 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9603 (phba->cfg_XLanePriority << 1));
9604 }
9605 }
James Smartb5c53952016-03-31 14:12:30 -07009606 /* Note, word 10 is already initialized to 0 */
9607
James Smart414abe02018-06-26 08:24:26 -07009608 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9609 if (phba->cfg_enable_pbde)
James Smart0bc2b7c2018-02-22 08:18:48 -08009610 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9611 else
9612 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9613
James Smartb5c53952016-03-31 14:12:30 -07009614 if (phba->fcp_embed_io) {
James Smartc4908502019-01-28 11:14:28 -08009615 struct lpfc_io_buf *lpfc_cmd;
James Smartb5c53952016-03-31 14:12:30 -07009616 struct sli4_sge *sgl;
James Smartb5c53952016-03-31 14:12:30 -07009617 struct fcp_cmnd *fcp_cmnd;
9618 uint32_t *ptr;
9619
9620 /* 128 byte wqe support here */
James Smartb5c53952016-03-31 14:12:30 -07009621
9622 lpfc_cmd = iocbq->context1;
James Smart0794d602019-01-28 11:14:19 -08009623 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smartb5c53952016-03-31 14:12:30 -07009624 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9625
9626 /* Word 0-2 - FCP_CMND */
James Smart205e8242018-03-05 12:04:03 -08009627 wqe->generic.bde.tus.f.bdeFlags =
James Smartb5c53952016-03-31 14:12:30 -07009628 BUFF_TYPE_BDE_IMMED;
James Smart205e8242018-03-05 12:04:03 -08009629 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9630 wqe->generic.bde.addrHigh = 0;
9631 wqe->generic.bde.addrLow = 88; /* Word 22 */
James Smartb5c53952016-03-31 14:12:30 -07009632
James Smart205e8242018-03-05 12:04:03 -08009633 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9634 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
James Smartb5c53952016-03-31 14:12:30 -07009635
9636 /* Word 22-29 FCP CMND Payload */
James Smart205e8242018-03-05 12:04:03 -08009637 ptr = &wqe->words[22];
James Smartb5c53952016-03-31 14:12:30 -07009638 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9639 }
James Smart7851fe22011-07-22 18:36:52 -04009640 break;
James Smartf1126682009-06-10 17:22:44 -04009641 case CMD_FCP_ICMND64_CR:
James Smart0ba4b212013-10-10 12:22:38 -04009642 /* word3 iocb=iotag wqe=payload_offset_len */
9643 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9644 bf_set(payload_offset_len, &wqe->fcp_icmd,
9645 xmit_len + sizeof(struct fcp_rsp));
9646 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9647 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04009648 /* word3 iocb=IO_TAG wqe=reserved */
James Smartf0d9bcc2010-10-22 11:07:09 -04009649 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
James Smartf1126682009-06-10 17:22:44 -04009650 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04009651 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9652 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9653 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9654 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9655 LPFC_WQE_LENLOC_NONE);
James Smart2a94aea2012-09-29 11:30:31 -04009656 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9657 iocbq->iocb.ulpFCP2Rcvy);
James Smart1ba981f2014-02-20 09:56:45 -05009658 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9659 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07009660 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9661 if (iocbq->priority) {
9662 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9663 (iocbq->priority << 1));
9664 } else {
James Smart1ba981f2014-02-20 09:56:45 -05009665 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9666 (phba->cfg_XLanePriority << 1));
9667 }
9668 }
James Smartb5c53952016-03-31 14:12:30 -07009669 /* Note, word 10 is already initialized to 0 */
9670
9671 if (phba->fcp_embed_io) {
James Smartc4908502019-01-28 11:14:28 -08009672 struct lpfc_io_buf *lpfc_cmd;
James Smartb5c53952016-03-31 14:12:30 -07009673 struct sli4_sge *sgl;
James Smartb5c53952016-03-31 14:12:30 -07009674 struct fcp_cmnd *fcp_cmnd;
9675 uint32_t *ptr;
9676
9677 /* 128 byte wqe support here */
James Smartb5c53952016-03-31 14:12:30 -07009678
9679 lpfc_cmd = iocbq->context1;
James Smart0794d602019-01-28 11:14:19 -08009680 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smartb5c53952016-03-31 14:12:30 -07009681 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9682
9683 /* Word 0-2 - FCP_CMND */
James Smart205e8242018-03-05 12:04:03 -08009684 wqe->generic.bde.tus.f.bdeFlags =
James Smartb5c53952016-03-31 14:12:30 -07009685 BUFF_TYPE_BDE_IMMED;
James Smart205e8242018-03-05 12:04:03 -08009686 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9687 wqe->generic.bde.addrHigh = 0;
9688 wqe->generic.bde.addrLow = 88; /* Word 22 */
James Smartb5c53952016-03-31 14:12:30 -07009689
James Smart205e8242018-03-05 12:04:03 -08009690 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9691 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
James Smartb5c53952016-03-31 14:12:30 -07009692
9693 /* Word 22-29 FCP CMND Payload */
James Smart205e8242018-03-05 12:04:03 -08009694 ptr = &wqe->words[22];
James Smartb5c53952016-03-31 14:12:30 -07009695 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9696 }
James Smart7851fe22011-07-22 18:36:52 -04009697 break;
James Smart4f774512009-05-22 14:52:35 -04009698 case CMD_GEN_REQUEST64_CR:
James Smart63e801c2010-11-20 23:14:19 -05009699 /* For this command calculate the xmit length of the
9700 * request bde.
9701 */
9702 xmit_len = 0;
9703 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9704 sizeof(struct ulp_bde64);
9705 for (i = 0; i < numBdes; i++) {
James Smart63e801c2010-11-20 23:14:19 -05009706 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
James Smart546fc852011-03-11 16:06:29 -05009707 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9708 break;
James Smart63e801c2010-11-20 23:14:19 -05009709 xmit_len += bde.tus.f.bdeSize;
9710 }
James Smartf0d9bcc2010-10-22 11:07:09 -04009711 /* word3 iocb=IO_TAG wqe=request_payload_len */
9712 wqe->gen_req.request_payload_len = xmit_len;
9713 /* word4 iocb=parameter wqe=relative_offset memcpy */
9714 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
James Smart4f774512009-05-22 14:52:35 -04009715 /* word6 context tag copied in memcpy */
9716 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9717 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9719 "2015 Invalid CT %x command 0x%x\n",
9720 ct, iocbq->iocb.ulpCommand);
9721 return IOCB_ERROR;
9722 }
James Smartf0d9bcc2010-10-22 11:07:09 -04009723 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9724 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9725 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9726 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9727 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9728 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9729 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9730 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04009731 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
James Smart4f774512009-05-22 14:52:35 -04009732 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04009733 break;
James Smart4f774512009-05-22 14:52:35 -04009734 case CMD_XMIT_ELS_RSP64_CX:
James Smartc31098c2011-04-16 11:03:33 -04009735 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04009736 /* words0-2 BDE memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04009737 /* word3 iocb=iotag32 wqe=response_payload_len */
9738 wqe->xmit_els_rsp.response_payload_len = xmit_len;
James Smart939723a2012-05-09 21:19:03 -04009739 /* word4 */
9740 wqe->xmit_els_rsp.word4 = 0;
James Smart4f774512009-05-22 14:52:35 -04009741 /* word5 iocb=rsvd wge=did */
9742 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
James Smart939723a2012-05-09 21:19:03 -04009743 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9744
9745 if_type = bf_get(lpfc_sli_intf_if_type,
9746 &phba->sli4_hba.sli_intf);
James Smart27d6ac02018-02-22 08:18:42 -08009747 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
James Smart939723a2012-05-09 21:19:03 -04009748 if (iocbq->vport->fc_flag & FC_PT2PT) {
9749 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9750 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9751 iocbq->vport->fc_myDID);
9752 if (iocbq->vport->fc_myDID == Fabric_DID) {
9753 bf_set(wqe_els_did,
9754 &wqe->xmit_els_rsp.wqe_dest, 0);
9755 }
9756 }
9757 }
James Smartf0d9bcc2010-10-22 11:07:09 -04009758 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9759 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9760 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9761 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04009762 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart4f774512009-05-22 14:52:35 -04009763 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
James Smartf0d9bcc2010-10-22 11:07:09 -04009764 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smart6d368e52011-05-24 11:44:12 -04009765 phba->vpi_ids[iocbq->vport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009766 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9767 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9768 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9769 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9770 LPFC_WQE_LENLOC_WORD3);
9771 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
James Smart6d368e52011-05-24 11:44:12 -04009772 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9773 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartff78d8f2011-12-13 13:21:35 -05009774 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9775 iocbq->context2)->virt);
9776 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
James Smart939723a2012-05-09 21:19:03 -04009777 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9778 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
James Smartff78d8f2011-12-13 13:21:35 -05009779 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04009780 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9781 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smartff78d8f2011-12-13 13:21:35 -05009782 phba->vpi_ids[phba->pport->vpi]);
9783 }
James Smart4f774512009-05-22 14:52:35 -04009784 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04009785 break;
James Smart4f774512009-05-22 14:52:35 -04009786 case CMD_CLOSE_XRI_CN:
9787 case CMD_ABORT_XRI_CN:
9788 case CMD_ABORT_XRI_CX:
9789 /* words 0-2 memcpy should be 0 rserved */
9790 /* port will send abts */
James Smartdcf2a4e2010-09-29 11:18:53 -04009791 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9792 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9793 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9794 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9795 } else
9796 fip = 0;
9797
9798 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
James Smart4f774512009-05-22 14:52:35 -04009799 /*
James Smartdcf2a4e2010-09-29 11:18:53 -04009800 * The link is down, or the command was ELS_FIP
9801 * so the fw does not need to send abts
James Smart4f774512009-05-22 14:52:35 -04009802 * on the wire.
9803 */
9804 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9805 else
9806 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9807 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
James Smartf0d9bcc2010-10-22 11:07:09 -04009808 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9809 wqe->abort_cmd.rsrvd5 = 0;
9810 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04009811 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9812 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
James Smart4f774512009-05-22 14:52:35 -04009813 /*
9814 * The abort handler will send us CMD_ABORT_XRI_CN or
9815 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9816 */
James Smartf0d9bcc2010-10-22 11:07:09 -04009817 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9818 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9819 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9820 LPFC_WQE_LENLOC_NONE);
James Smart4f774512009-05-22 14:52:35 -04009821 cmnd = CMD_ABORT_XRI_CX;
9822 command_type = OTHER_COMMAND;
9823 xritag = 0;
James Smart7851fe22011-07-22 18:36:52 -04009824 break;
James Smart6669f9b2009-10-02 15:16:45 -04009825 case CMD_XMIT_BLS_RSP64_CX:
James Smart6b5151f2012-01-18 16:24:06 -05009826 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart546fc852011-03-11 16:06:29 -05009827 /* As BLS ABTS RSP WQE is very different from other WQEs,
James Smart6669f9b2009-10-02 15:16:45 -04009828 * we re-construct this WQE here based on information in
9829 * iocbq from scratch.
9830 */
James Smartd9f492a2019-08-14 16:57:04 -07009831 memset(wqe, 0, sizeof(*wqe));
James Smart5ffc2662009-11-18 15:39:44 -05009832 /* OX_ID is invariable to who sent ABTS to CT exchange */
James Smart6669f9b2009-10-02 15:16:45 -04009833 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05009834 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9835 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
James Smart5ffc2662009-11-18 15:39:44 -05009836 LPFC_ABTS_UNSOL_INT) {
9837 /* ABTS sent by initiator to CT exchange, the
9838 * RX_ID field will be filled with the newly
9839 * allocated responder XRI.
9840 */
9841 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9842 iocbq->sli4_xritag);
9843 } else {
9844 /* ABTS sent by responder to CT exchange, the
9845 * RX_ID field will be filled with the responder
9846 * RX_ID from ABTS.
9847 */
9848 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05009849 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
James Smart5ffc2662009-11-18 15:39:44 -05009850 }
James Smart6669f9b2009-10-02 15:16:45 -04009851 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9852 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
James Smart6b5151f2012-01-18 16:24:06 -05009853
9854 /* Use CT=VPI */
9855 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9856 ndlp->nlp_DID);
9857 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9858 iocbq->iocb.ulpContext);
9859 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
James Smart6669f9b2009-10-02 15:16:45 -04009860 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
James Smart6b5151f2012-01-18 16:24:06 -05009861 phba->vpi_ids[phba->pport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009862 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9863 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9864 LPFC_WQE_LENLOC_NONE);
James Smart6669f9b2009-10-02 15:16:45 -04009865 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9866 command_type = OTHER_COMMAND;
James Smart546fc852011-03-11 16:06:29 -05009867 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9868 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9869 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9870 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9871 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9872 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9873 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9874 }
9875
James Smart7851fe22011-07-22 18:36:52 -04009876 break;
James Smartae9e28f2017-05-15 15:20:51 -07009877 case CMD_SEND_FRAME:
James Smarte62245d2019-08-14 16:57:08 -07009878 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9879 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
9880 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
9881 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9882 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9883 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9884 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9885 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
9886 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smartae9e28f2017-05-15 15:20:51 -07009887 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9888 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9889 return 0;
James Smart4f774512009-05-22 14:52:35 -04009890 case CMD_XRI_ABORTED_CX:
9891 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
James Smart4f774512009-05-22 14:52:35 -04009892 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9893 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9894 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9895 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9896 default:
9897 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9898 "2014 Invalid command 0x%x\n",
9899 iocbq->iocb.ulpCommand);
9900 return IOCB_ERROR;
James Smart7851fe22011-07-22 18:36:52 -04009901 break;
James Smart4f774512009-05-22 14:52:35 -04009902 }
James Smart6d368e52011-05-24 11:44:12 -04009903
James Smart8012cc32012-10-31 14:44:49 -04009904 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9905 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9906 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9907 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9908 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9909 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9910 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9911 LPFC_IO_DIF_INSERT);
James Smartf0d9bcc2010-10-22 11:07:09 -04009912 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9913 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9914 wqe->generic.wqe_com.abort_tag = abort_tag;
9915 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9916 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9917 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9918 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smart4f774512009-05-22 14:52:35 -04009919 return 0;
9920}
9921
9922/**
9923 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9924 * @phba: Pointer to HBA context object.
9925 * @ring_number: SLI ring number to issue iocb on.
9926 * @piocb: Pointer to command iocb.
9927 * @flag: Flag indicating if this command can be put into txq.
9928 *
9929 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9930 * an iocb command to an HBA with SLI-4 interface spec.
9931 *
James Smart27f3efd2019-10-18 14:18:19 -07009932 * This function is called with ringlock held. The function will return success
James Smart4f774512009-05-22 14:52:35 -04009933 * after it successfully submit the iocb to firmware or after adding to the
9934 * txq.
9935 **/
9936static int
9937__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9938 struct lpfc_iocbq *piocb, uint32_t flag)
9939{
9940 struct lpfc_sglq *sglq;
James Smart205e8242018-03-05 12:04:03 -08009941 union lpfc_wqe128 wqe;
James Smart1ba981f2014-02-20 09:56:45 -05009942 struct lpfc_queue *wq;
James Smart895427b2017-02-12 13:52:30 -08009943 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04009944
James Smart895427b2017-02-12 13:52:30 -08009945 /* Get the WQ */
9946 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9947 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
James Smartc00f62e2019-08-14 16:57:11 -07009948 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
James Smart895427b2017-02-12 13:52:30 -08009949 } else {
9950 wq = phba->sli4_hba.els_wq;
9951 }
9952
9953 /* Get corresponding ring */
9954 pring = wq->pring;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009955
James Smartb5c53952016-03-31 14:12:30 -07009956 /*
9957 * The WQE can be either 64 or 128 bytes,
James Smartb5c53952016-03-31 14:12:30 -07009958 */
James Smartb5c53952016-03-31 14:12:30 -07009959
James Smartcda7fa182019-03-04 15:15:43 -08009960 lockdep_assert_held(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -08009961
James Smart4f774512009-05-22 14:52:35 -04009962 if (piocb->sli4_xritag == NO_XRI) {
9963 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
James Smart6b5151f2012-01-18 16:24:06 -05009964 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
James Smart4f774512009-05-22 14:52:35 -04009965 sglq = NULL;
9966 else {
James Smart0e9bb8d2013-03-01 16:35:12 -05009967 if (!list_empty(&pring->txq)) {
James Smart2a9bf3d2010-06-07 15:24:45 -04009968 if (!(flag & SLI_IOCB_RET_IOCB)) {
9969 __lpfc_sli_ringtx_put(phba,
9970 pring, piocb);
9971 return IOCB_SUCCESS;
9972 } else {
9973 return IOCB_BUSY;
9974 }
9975 } else {
James Smart895427b2017-02-12 13:52:30 -08009976 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04009977 if (!sglq) {
9978 if (!(flag & SLI_IOCB_RET_IOCB)) {
9979 __lpfc_sli_ringtx_put(phba,
9980 pring,
9981 piocb);
9982 return IOCB_SUCCESS;
9983 } else
9984 return IOCB_BUSY;
9985 }
9986 }
James Smart4f774512009-05-22 14:52:35 -04009987 }
James Smart2ea259e2017-02-12 13:52:27 -08009988 } else if (piocb->iocb_flag & LPFC_IO_FCP)
James Smart6d368e52011-05-24 11:44:12 -04009989 /* These IO's already have an XRI and a mapped sgl. */
9990 sglq = NULL;
James Smart2ea259e2017-02-12 13:52:27 -08009991 else {
James Smart6d368e52011-05-24 11:44:12 -04009992 /*
9993 * This is a continuation of a commandi,(CX) so this
James Smart4f774512009-05-22 14:52:35 -04009994 * sglq is on the active list
9995 */
James Smartedccdc12013-01-03 15:43:45 -05009996 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
James Smart4f774512009-05-22 14:52:35 -04009997 if (!sglq)
9998 return IOCB_ERROR;
9999 }
10000
10001 if (sglq) {
James Smart6d368e52011-05-24 11:44:12 -040010002 piocb->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040010003 piocb->sli4_xritag = sglq->sli4_xritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040010004 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
James Smart4f774512009-05-22 14:52:35 -040010005 return IOCB_ERROR;
10006 }
10007
James Smart205e8242018-03-05 12:04:03 -080010008 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
James Smart4f774512009-05-22 14:52:35 -040010009 return IOCB_ERROR;
10010
James Smart205e8242018-03-05 12:04:03 -080010011 if (lpfc_sli4_wq_put(wq, &wqe))
James Smart895427b2017-02-12 13:52:30 -080010012 return IOCB_ERROR;
James Smart4f774512009-05-22 14:52:35 -040010013 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10014
10015 return 0;
10016}
10017
10018/**
James Smart3772a992009-05-22 14:50:54 -040010019 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10020 *
10021 * This routine wraps the actual lockless version for issusing IOCB function
10022 * pointer from the lpfc_hba struct.
10023 *
10024 * Return codes:
James Smartb5c53952016-03-31 14:12:30 -070010025 * IOCB_ERROR - Error
10026 * IOCB_SUCCESS - Success
10027 * IOCB_BUSY - Busy
James Smart3772a992009-05-22 14:50:54 -040010028 **/
James Smart2a9bf3d2010-06-07 15:24:45 -040010029int
James Smart3772a992009-05-22 14:50:54 -040010030__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10031 struct lpfc_iocbq *piocb, uint32_t flag)
10032{
10033 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10034}
10035
10036/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010037 * lpfc_sli_api_table_setup - Set up sli api function jump table
James Smart3772a992009-05-22 14:50:54 -040010038 * @phba: The hba struct for which this call is being executed.
10039 * @dev_grp: The HBA PCI-Device group number.
10040 *
10041 * This routine sets up the SLI interface API function jump table in @phba
10042 * struct.
10043 * Returns: 0 - success, -ENODEV - failure.
10044 **/
10045int
10046lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10047{
10048
10049 switch (dev_grp) {
10050 case LPFC_PCI_DEV_LP:
10051 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10052 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10053 break;
James Smart4f774512009-05-22 14:52:35 -040010054 case LPFC_PCI_DEV_OC:
10055 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10056 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10057 break;
James Smart3772a992009-05-22 14:50:54 -040010058 default:
10059 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10060 "1419 Invalid HBA PCI-device group: 0x%x\n",
10061 dev_grp);
10062 return -ENODEV;
10063 break;
10064 }
10065 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10066 return 0;
10067}
James Smart92d7f7b2007-06-17 19:56:38 -050010068
James Smarta1efe162015-05-21 13:55:20 -040010069/**
James Smart895427b2017-02-12 13:52:30 -080010070 * lpfc_sli4_calc_ring - Calculates which ring to use
James Smarta1efe162015-05-21 13:55:20 -040010071 * @phba: Pointer to HBA context object.
James Smarta1efe162015-05-21 13:55:20 -040010072 * @piocb: Pointer to command iocb.
10073 *
James Smart895427b2017-02-12 13:52:30 -080010074 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10075 * hba_wqidx, thus we need to calculate the corresponding ring.
James Smarta1efe162015-05-21 13:55:20 -040010076 * Since ABORTS must go on the same WQ of the command they are
James Smart895427b2017-02-12 13:52:30 -080010077 * aborting, we use command's hba_wqidx.
James Smarta1efe162015-05-21 13:55:20 -040010078 */
James Smart895427b2017-02-12 13:52:30 -080010079struct lpfc_sli_ring *
10080lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
James Smart9bd2bff52014-09-03 12:57:30 -040010081{
James Smartc4908502019-01-28 11:14:28 -080010082 struct lpfc_io_buf *lpfc_cmd;
James Smart5e5b5112019-01-28 11:14:22 -080010083
James Smart895427b2017-02-12 13:52:30 -080010084 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
James Smartcdb42be2019-01-28 11:14:21 -080010085 if (unlikely(!phba->sli4_hba.hdwq))
James Smart7370d102019-01-28 11:14:20 -080010086 return NULL;
10087 /*
10088 * for abort iocb hba_wqidx should already
10089 * be setup based on what work queue we used.
10090 */
10091 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
James Smartc4908502019-01-28 11:14:28 -080010092 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
James Smart1fbf9742019-01-28 11:14:26 -080010093 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
James Smart9bd2bff52014-09-03 12:57:30 -040010094 }
James Smartc00f62e2019-08-14 16:57:11 -070010095 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
James Smart895427b2017-02-12 13:52:30 -080010096 } else {
10097 if (unlikely(!phba->sli4_hba.els_wq))
10098 return NULL;
10099 piocb->hba_wqidx = 0;
10100 return phba->sli4_hba.els_wq->pring;
James Smart9bd2bff52014-09-03 12:57:30 -040010101 }
James Smart9bd2bff52014-09-03 12:57:30 -040010102}
10103
James Smarte59058c2008-08-24 21:49:00 -040010104/**
James Smart3621a712009-04-06 18:47:14 -040010105 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -040010106 * @phba: Pointer to HBA context object.
10107 * @pring: Pointer to driver SLI ring object.
10108 * @piocb: Pointer to command iocb.
10109 * @flag: Flag indicating if this command can be put into txq.
10110 *
10111 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10112 * function. This function gets the hbalock and calls
10113 * __lpfc_sli_issue_iocb function and will return the error returned
10114 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10115 * functions which do not hold hbalock.
10116 **/
James Smart92d7f7b2007-06-17 19:56:38 -050010117int
James Smart3772a992009-05-22 14:50:54 -040010118lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
James Smart92d7f7b2007-06-17 19:56:38 -050010119 struct lpfc_iocbq *piocb, uint32_t flag)
10120{
James Smart2a76a282012-08-03 12:35:54 -040010121 struct lpfc_sli_ring *pring;
James Smart93a4d6f2019-11-04 16:57:05 -080010122 struct lpfc_queue *eq;
James Smart92d7f7b2007-06-17 19:56:38 -050010123 unsigned long iflags;
James Smart6a828b02019-01-28 11:14:31 -080010124 int rc;
James Smart92d7f7b2007-06-17 19:56:38 -050010125
James Smart7e56aa22012-08-03 12:35:34 -040010126 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart93a4d6f2019-11-04 16:57:05 -080010127 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10128
James Smart895427b2017-02-12 13:52:30 -080010129 pring = lpfc_sli4_calc_ring(phba, piocb);
10130 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -040010131 return IOCB_ERROR;
James Smartba20c852012-08-03 12:36:52 -040010132
James Smart9bd2bff52014-09-03 12:57:30 -040010133 spin_lock_irqsave(&pring->ring_lock, iflags);
10134 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10135 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080010136
10137 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
James Smart7e56aa22012-08-03 12:35:34 -040010138 } else {
10139 /* For now, SLI2/3 will still use hbalock */
10140 spin_lock_irqsave(&phba->hbalock, iflags);
10141 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10142 spin_unlock_irqrestore(&phba->hbalock, iflags);
10143 }
James Smart92d7f7b2007-06-17 19:56:38 -050010144 return rc;
10145}
10146
James Smarte59058c2008-08-24 21:49:00 -040010147/**
James Smart3621a712009-04-06 18:47:14 -040010148 * lpfc_extra_ring_setup - Extra ring setup function
James Smarte59058c2008-08-24 21:49:00 -040010149 * @phba: Pointer to HBA context object.
10150 *
10151 * This function is called while driver attaches with the
10152 * HBA to setup the extra ring. The extra ring is used
10153 * only when driver needs to support target mode functionality
10154 * or IP over FC functionalities.
10155 *
James Smart895427b2017-02-12 13:52:30 -080010156 * This function is called with no lock held. SLI3 only.
James Smarte59058c2008-08-24 21:49:00 -040010157 **/
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010158static int
10159lpfc_extra_ring_setup( struct lpfc_hba *phba)
10160{
10161 struct lpfc_sli *psli;
10162 struct lpfc_sli_ring *pring;
10163
10164 psli = &phba->sli;
10165
10166 /* Adjust cmd/rsp ring iocb entries more evenly */
James Smarta4bc3372006-12-02 13:34:16 -050010167
10168 /* Take some away from the FCP ring */
James Smart895427b2017-02-12 13:52:30 -080010169 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smart7e56aa22012-08-03 12:35:34 -040010170 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10171 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10172 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10173 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010174
James Smarta4bc3372006-12-02 13:34:16 -050010175 /* and give them to the extra ring */
James Smart895427b2017-02-12 13:52:30 -080010176 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
James Smarta4bc3372006-12-02 13:34:16 -050010177
James Smart7e56aa22012-08-03 12:35:34 -040010178 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10179 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10180 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10181 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010182
10183 /* Setup default profile for this ring */
10184 pring->iotag_max = 4096;
10185 pring->num_mask = 1;
10186 pring->prt[0].profile = 0; /* Mask 0 */
James Smarta4bc3372006-12-02 13:34:16 -050010187 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10188 pring->prt[0].type = phba->cfg_multi_ring_type;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010189 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10190 return 0;
10191}
10192
James Smartcb69f7d2011-12-13 13:21:57 -050010193/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10194 * @phba: Pointer to HBA context object.
10195 * @iocbq: Pointer to iocb object.
10196 *
10197 * The async_event handler calls this routine when it receives
10198 * an ASYNC_STATUS_CN event from the port. The port generates
10199 * this event when an Abort Sequence request to an rport fails
10200 * twice in succession. The abort could be originated by the
10201 * driver or by the port. The ABTS could have been for an ELS
10202 * or FCP IO. The port only generates this event when an ABTS
10203 * fails to complete after one retry.
10204 */
10205static void
10206lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10207 struct lpfc_iocbq *iocbq)
10208{
10209 struct lpfc_nodelist *ndlp = NULL;
10210 uint16_t rpi = 0, vpi = 0;
10211 struct lpfc_vport *vport = NULL;
10212
10213 /* The rpi in the ulpContext is vport-sensitive. */
10214 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10215 rpi = iocbq->iocb.ulpContext;
10216
10217 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10218 "3092 Port generated ABTS async event "
10219 "on vpi %d rpi %d status 0x%x\n",
10220 vpi, rpi, iocbq->iocb.ulpStatus);
10221
10222 vport = lpfc_find_vport_by_vpid(phba, vpi);
10223 if (!vport)
10224 goto err_exit;
10225 ndlp = lpfc_findnode_rpi(vport, rpi);
10226 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10227 goto err_exit;
10228
10229 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10230 lpfc_sli_abts_recover_port(vport, ndlp);
10231 return;
10232
10233 err_exit:
10234 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10235 "3095 Event Context not found, no "
10236 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10237 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10238 vpi, rpi);
10239}
10240
10241/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10242 * @phba: pointer to HBA context object.
10243 * @ndlp: nodelist pointer for the impacted rport.
10244 * @axri: pointer to the wcqe containing the failed exchange.
10245 *
10246 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10247 * port. The port generates this event when an abort exchange request to an
10248 * rport fails twice in succession with no reply. The abort could be originated
10249 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10250 */
10251void
10252lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10253 struct lpfc_nodelist *ndlp,
10254 struct sli4_wcqe_xri_aborted *axri)
10255{
10256 struct lpfc_vport *vport;
James Smart5c1db2a2012-03-01 22:34:36 -050010257 uint32_t ext_status = 0;
James Smartcb69f7d2011-12-13 13:21:57 -050010258
James Smart6b5151f2012-01-18 16:24:06 -050010259 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
James Smartcb69f7d2011-12-13 13:21:57 -050010260 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10261 "3115 Node Context not found, driver "
10262 "ignoring abts err event\n");
James Smart6b5151f2012-01-18 16:24:06 -050010263 return;
10264 }
10265
James Smartcb69f7d2011-12-13 13:21:57 -050010266 vport = ndlp->vport;
10267 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10268 "3116 Port generated FCP XRI ABORT event on "
James Smart5c1db2a2012-03-01 22:34:36 -050010269 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
James Smart8e668af2013-05-31 17:04:28 -040010270 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
James Smartcb69f7d2011-12-13 13:21:57 -050010271 bf_get(lpfc_wcqe_xa_xri, axri),
James Smart5c1db2a2012-03-01 22:34:36 -050010272 bf_get(lpfc_wcqe_xa_status, axri),
10273 axri->parameter);
James Smartcb69f7d2011-12-13 13:21:57 -050010274
James Smart5c1db2a2012-03-01 22:34:36 -050010275 /*
10276 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10277 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10278 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10279 */
James Smarte3d2b802012-08-14 14:25:43 -040010280 ext_status = axri->parameter & IOERR_PARAM_MASK;
James Smart5c1db2a2012-03-01 22:34:36 -050010281 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10282 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
James Smartcb69f7d2011-12-13 13:21:57 -050010283 lpfc_sli_abts_recover_port(vport, ndlp);
10284}
10285
James Smarte59058c2008-08-24 21:49:00 -040010286/**
James Smart3621a712009-04-06 18:47:14 -040010287 * lpfc_sli_async_event_handler - ASYNC iocb handler function
James Smarte59058c2008-08-24 21:49:00 -040010288 * @phba: Pointer to HBA context object.
10289 * @pring: Pointer to driver SLI ring object.
10290 * @iocbq: Pointer to iocb object.
10291 *
10292 * This function is called by the slow ring event handler
10293 * function when there is an ASYNC event iocb in the ring.
10294 * This function is called with no lock held.
10295 * Currently this function handles only temperature related
10296 * ASYNC events. The function decodes the temperature sensor
10297 * event message and posts events for the management applications.
10298 **/
James Smart98c9ea52007-10-27 13:37:33 -040010299static void
James Smart57127f12007-10-27 13:37:05 -040010300lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10301 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10302{
10303 IOCB_t *icmd;
10304 uint16_t evt_code;
James Smart57127f12007-10-27 13:37:05 -040010305 struct temp_event temp_event_data;
10306 struct Scsi_Host *shost;
James Smarta257bf92009-04-06 18:48:10 -040010307 uint32_t *iocb_w;
James Smart57127f12007-10-27 13:37:05 -040010308
10309 icmd = &iocbq->iocb;
10310 evt_code = icmd->un.asyncstat.evt_code;
James Smart57127f12007-10-27 13:37:05 -040010311
James Smartcb69f7d2011-12-13 13:21:57 -050010312 switch (evt_code) {
10313 case ASYNC_TEMP_WARN:
10314 case ASYNC_TEMP_SAFE:
10315 temp_event_data.data = (uint32_t) icmd->ulpContext;
10316 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10317 if (evt_code == ASYNC_TEMP_WARN) {
10318 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10319 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10320 "0347 Adapter is very hot, please take "
10321 "corrective action. temperature : %d Celsius\n",
10322 (uint32_t) icmd->ulpContext);
10323 } else {
10324 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10325 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10326 "0340 Adapter temperature is OK now. "
10327 "temperature : %d Celsius\n",
10328 (uint32_t) icmd->ulpContext);
10329 }
10330
10331 /* Send temperature change event to applications */
10332 shost = lpfc_shost_from_vport(phba->pport);
10333 fc_host_post_vendor_event(shost, fc_get_event_number(),
10334 sizeof(temp_event_data), (char *) &temp_event_data,
10335 LPFC_NL_VENDOR_ID);
10336 break;
10337 case ASYNC_STATUS_CN:
10338 lpfc_sli_abts_err_handler(phba, iocbq);
10339 break;
10340 default:
James Smarta257bf92009-04-06 18:48:10 -040010341 iocb_w = (uint32_t *) icmd;
James Smartcb69f7d2011-12-13 13:21:57 -050010342 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -040010343 "0346 Ring %d handler: unexpected ASYNC_STATUS"
James Smarte4e74272009-07-19 10:01:38 -040010344 " evt_code 0x%x\n"
James Smarta257bf92009-04-06 18:48:10 -040010345 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10346 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10347 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10348 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
James Smartcb69f7d2011-12-13 13:21:57 -050010349 pring->ringno, icmd->un.asyncstat.evt_code,
James Smarta257bf92009-04-06 18:48:10 -040010350 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10351 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10352 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10353 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10354
James Smartcb69f7d2011-12-13 13:21:57 -050010355 break;
James Smart57127f12007-10-27 13:37:05 -040010356 }
James Smart57127f12007-10-27 13:37:05 -040010357}
10358
10359
James Smarte59058c2008-08-24 21:49:00 -040010360/**
James Smart895427b2017-02-12 13:52:30 -080010361 * lpfc_sli4_setup - SLI ring setup function
James Smarte59058c2008-08-24 21:49:00 -040010362 * @phba: Pointer to HBA context object.
10363 *
10364 * lpfc_sli_setup sets up rings of the SLI interface with
10365 * number of iocbs per ring and iotags. This function is
10366 * called while driver attach to the HBA and before the
10367 * interrupts are enabled. So there is no need for locking.
10368 *
10369 * This function always returns 0.
10370 **/
dea31012005-04-17 16:05:31 -050010371int
James Smart895427b2017-02-12 13:52:30 -080010372lpfc_sli4_setup(struct lpfc_hba *phba)
10373{
10374 struct lpfc_sli_ring *pring;
10375
10376 pring = phba->sli4_hba.els_wq->pring;
10377 pring->num_mask = LPFC_MAX_RING_MASK;
10378 pring->prt[0].profile = 0; /* Mask 0 */
10379 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10380 pring->prt[0].type = FC_TYPE_ELS;
10381 pring->prt[0].lpfc_sli_rcv_unsol_event =
10382 lpfc_els_unsol_event;
10383 pring->prt[1].profile = 0; /* Mask 1 */
10384 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10385 pring->prt[1].type = FC_TYPE_ELS;
10386 pring->prt[1].lpfc_sli_rcv_unsol_event =
10387 lpfc_els_unsol_event;
10388 pring->prt[2].profile = 0; /* Mask 2 */
10389 /* NameServer Inquiry */
10390 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10391 /* NameServer */
10392 pring->prt[2].type = FC_TYPE_CT;
10393 pring->prt[2].lpfc_sli_rcv_unsol_event =
10394 lpfc_ct_unsol_event;
10395 pring->prt[3].profile = 0; /* Mask 3 */
10396 /* NameServer response */
10397 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10398 /* NameServer */
10399 pring->prt[3].type = FC_TYPE_CT;
10400 pring->prt[3].lpfc_sli_rcv_unsol_event =
10401 lpfc_ct_unsol_event;
10402 return 0;
10403}
10404
10405/**
10406 * lpfc_sli_setup - SLI ring setup function
10407 * @phba: Pointer to HBA context object.
10408 *
10409 * lpfc_sli_setup sets up rings of the SLI interface with
10410 * number of iocbs per ring and iotags. This function is
10411 * called while driver attach to the HBA and before the
10412 * interrupts are enabled. So there is no need for locking.
10413 *
10414 * This function always returns 0. SLI3 only.
10415 **/
10416int
dea31012005-04-17 16:05:31 -050010417lpfc_sli_setup(struct lpfc_hba *phba)
10418{
James Smarted957682007-06-17 19:56:37 -050010419 int i, totiocbsize = 0;
dea31012005-04-17 16:05:31 -050010420 struct lpfc_sli *psli = &phba->sli;
10421 struct lpfc_sli_ring *pring;
10422
James Smart2a76a282012-08-03 12:35:54 -040010423 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea31012005-04-17 16:05:31 -050010424 psli->sli_flag = 0;
dea31012005-04-17 16:05:31 -050010425
James Bottomley604a3e32005-10-29 10:28:33 -050010426 psli->iocbq_lookup = NULL;
10427 psli->iocbq_lookup_len = 0;
10428 psli->last_iotag = 0;
10429
dea31012005-04-17 16:05:31 -050010430 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -080010431 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -050010432 switch (i) {
10433 case LPFC_FCP_RING: /* ring 0 - FCP */
10434 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -040010435 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10436 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10437 pring->sli.sli3.numCiocb +=
10438 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10439 pring->sli.sli3.numRiocb +=
10440 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10441 pring->sli.sli3.numCiocb +=
10442 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10443 pring->sli.sli3.numRiocb +=
10444 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10445 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010446 SLI3_IOCB_CMD_SIZE :
10447 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -040010448 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010449 SLI3_IOCB_RSP_SIZE :
10450 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -050010451 pring->iotag_ctr = 0;
10452 pring->iotag_max =
James Smart92d7f7b2007-06-17 19:56:38 -050010453 (phba->cfg_hba_queue_depth * 2);
dea31012005-04-17 16:05:31 -050010454 pring->fast_iotag = pring->iotag_max;
10455 pring->num_mask = 0;
10456 break;
James Smarta4bc3372006-12-02 13:34:16 -050010457 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea31012005-04-17 16:05:31 -050010458 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -040010459 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10460 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10461 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010462 SLI3_IOCB_CMD_SIZE :
10463 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -040010464 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010465 SLI3_IOCB_RSP_SIZE :
10466 SLI2_IOCB_RSP_SIZE;
James Smart2e0fef82007-06-17 19:56:36 -050010467 pring->iotag_max = phba->cfg_hba_queue_depth;
dea31012005-04-17 16:05:31 -050010468 pring->num_mask = 0;
10469 break;
10470 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10471 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -040010472 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10473 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10474 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010475 SLI3_IOCB_CMD_SIZE :
10476 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -040010477 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010478 SLI3_IOCB_RSP_SIZE :
10479 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -050010480 pring->fast_iotag = 0;
10481 pring->iotag_ctr = 0;
10482 pring->iotag_max = 4096;
James Smart57127f12007-10-27 13:37:05 -040010483 pring->lpfc_sli_rcv_async_status =
10484 lpfc_sli_async_event_handler;
James Smart6669f9b2009-10-02 15:16:45 -040010485 pring->num_mask = LPFC_MAX_RING_MASK;
dea31012005-04-17 16:05:31 -050010486 pring->prt[0].profile = 0; /* Mask 0 */
James Smart6a9c52c2009-10-02 15:16:51 -040010487 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10488 pring->prt[0].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -050010489 pring->prt[0].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010490 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -050010491 pring->prt[1].profile = 0; /* Mask 1 */
James Smart6a9c52c2009-10-02 15:16:51 -040010492 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10493 pring->prt[1].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -050010494 pring->prt[1].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010495 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -050010496 pring->prt[2].profile = 0; /* Mask 2 */
10497 /* NameServer Inquiry */
James Smart6a9c52c2009-10-02 15:16:51 -040010498 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea31012005-04-17 16:05:31 -050010499 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -040010500 pring->prt[2].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -050010501 pring->prt[2].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010502 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -050010503 pring->prt[3].profile = 0; /* Mask 3 */
10504 /* NameServer response */
James Smart6a9c52c2009-10-02 15:16:51 -040010505 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea31012005-04-17 16:05:31 -050010506 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -040010507 pring->prt[3].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -050010508 pring->prt[3].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010509 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -050010510 break;
10511 }
James Smart7e56aa22012-08-03 12:35:34 -040010512 totiocbsize += (pring->sli.sli3.numCiocb *
10513 pring->sli.sli3.sizeCiocb) +
10514 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea31012005-04-17 16:05:31 -050010515 }
James Smarted957682007-06-17 19:56:37 -050010516 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea31012005-04-17 16:05:31 -050010517 /* Too many cmd / rsp ring entries in SLI2 SLIM */
James Smarte8b62012007-08-02 11:10:09 -040010518 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10519 "SLI2 SLIM Data: x%x x%lx\n",
10520 phba->brd_no, totiocbsize,
10521 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea31012005-04-17 16:05:31 -050010522 }
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010523 if (phba->cfg_multi_ring_support == 2)
10524 lpfc_extra_ring_setup(phba);
dea31012005-04-17 16:05:31 -050010525
10526 return 0;
10527}
10528
James Smarte59058c2008-08-24 21:49:00 -040010529/**
James Smart895427b2017-02-12 13:52:30 -080010530 * lpfc_sli4_queue_init - Queue initialization function
James Smarte59058c2008-08-24 21:49:00 -040010531 * @phba: Pointer to HBA context object.
10532 *
James Smart895427b2017-02-12 13:52:30 -080010533 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
James Smarte59058c2008-08-24 21:49:00 -040010534 * ring. This function also initializes ring indices of each ring.
10535 * This function is called during the initialization of the SLI
10536 * interface of an HBA.
10537 * This function is called with no lock held and always returns
10538 * 1.
10539 **/
James Smart895427b2017-02-12 13:52:30 -080010540void
10541lpfc_sli4_queue_init(struct lpfc_hba *phba)
10542{
10543 struct lpfc_sli *psli;
10544 struct lpfc_sli_ring *pring;
10545 int i;
10546
10547 psli = &phba->sli;
10548 spin_lock_irq(&phba->hbalock);
10549 INIT_LIST_HEAD(&psli->mboxq);
10550 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10551 /* Initialize list headers for txq and txcmplq as double linked lists */
James Smartcdb42be2019-01-28 11:14:21 -080010552 for (i = 0; i < phba->cfg_hdw_queue; i++) {
James Smartc00f62e2019-08-14 16:57:11 -070010553 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
James Smart895427b2017-02-12 13:52:30 -080010554 pring->flag = 0;
10555 pring->ringno = LPFC_FCP_RING;
James Smartc4908502019-01-28 11:14:28 -080010556 pring->txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080010557 INIT_LIST_HEAD(&pring->txq);
10558 INIT_LIST_HEAD(&pring->txcmplq);
10559 INIT_LIST_HEAD(&pring->iocb_continueq);
10560 spin_lock_init(&pring->ring_lock);
10561 }
10562 pring = phba->sli4_hba.els_wq->pring;
10563 pring->flag = 0;
10564 pring->ringno = LPFC_ELS_RING;
James Smartc4908502019-01-28 11:14:28 -080010565 pring->txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080010566 INIT_LIST_HEAD(&pring->txq);
10567 INIT_LIST_HEAD(&pring->txcmplq);
10568 INIT_LIST_HEAD(&pring->iocb_continueq);
10569 spin_lock_init(&pring->ring_lock);
10570
James Smartcdb42be2019-01-28 11:14:21 -080010571 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart895427b2017-02-12 13:52:30 -080010572 pring = phba->sli4_hba.nvmels_wq->pring;
10573 pring->flag = 0;
10574 pring->ringno = LPFC_ELS_RING;
James Smartc4908502019-01-28 11:14:28 -080010575 pring->txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080010576 INIT_LIST_HEAD(&pring->txq);
10577 INIT_LIST_HEAD(&pring->txcmplq);
10578 INIT_LIST_HEAD(&pring->iocb_continueq);
10579 spin_lock_init(&pring->ring_lock);
10580 }
10581
10582 spin_unlock_irq(&phba->hbalock);
10583}
10584
10585/**
10586 * lpfc_sli_queue_init - Queue initialization function
10587 * @phba: Pointer to HBA context object.
10588 *
10589 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10590 * ring. This function also initializes ring indices of each ring.
10591 * This function is called during the initialization of the SLI
10592 * interface of an HBA.
10593 * This function is called with no lock held and always returns
10594 * 1.
10595 **/
10596void
10597lpfc_sli_queue_init(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -050010598{
10599 struct lpfc_sli *psli;
10600 struct lpfc_sli_ring *pring;
James Bottomley604a3e32005-10-29 10:28:33 -050010601 int i;
dea31012005-04-17 16:05:31 -050010602
10603 psli = &phba->sli;
James Smart2e0fef82007-06-17 19:56:36 -050010604 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010605 INIT_LIST_HEAD(&psli->mboxq);
James Smart92d7f7b2007-06-17 19:56:38 -050010606 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea31012005-04-17 16:05:31 -050010607 /* Initialize list headers for txq and txcmplq as double linked lists */
10608 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -080010609 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -050010610 pring->ringno = i;
James Smart7e56aa22012-08-03 12:35:34 -040010611 pring->sli.sli3.next_cmdidx = 0;
10612 pring->sli.sli3.local_getidx = 0;
10613 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -050010614 INIT_LIST_HEAD(&pring->iocb_continueq);
James Smart9c2face2008-01-11 01:53:18 -050010615 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea31012005-04-17 16:05:31 -050010616 INIT_LIST_HEAD(&pring->postbufq);
James Smart895427b2017-02-12 13:52:30 -080010617 pring->flag = 0;
10618 INIT_LIST_HEAD(&pring->txq);
10619 INIT_LIST_HEAD(&pring->txcmplq);
James Smart7e56aa22012-08-03 12:35:34 -040010620 spin_lock_init(&pring->ring_lock);
dea31012005-04-17 16:05:31 -050010621 }
James Smart2e0fef82007-06-17 19:56:36 -050010622 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010623}
10624
James Smarte59058c2008-08-24 21:49:00 -040010625/**
James Smart04c68492009-05-22 14:52:52 -040010626 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10627 * @phba: Pointer to HBA context object.
10628 *
10629 * This routine flushes the mailbox command subsystem. It will unconditionally
10630 * flush all the mailbox commands in the three possible stages in the mailbox
10631 * command sub-system: pending mailbox command queue; the outstanding mailbox
10632 * command; and completed mailbox command queue. It is caller's responsibility
10633 * to make sure that the driver is in the proper state to flush the mailbox
10634 * command sub-system. Namely, the posting of mailbox commands into the
10635 * pending mailbox command queue from the various clients must be stopped;
10636 * either the HBA is in a state that it will never works on the outstanding
10637 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10638 * mailbox command has been completed.
10639 **/
10640static void
10641lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10642{
10643 LIST_HEAD(completions);
10644 struct lpfc_sli *psli = &phba->sli;
10645 LPFC_MBOXQ_t *pmb;
10646 unsigned long iflag;
10647
James Smart523128e2018-09-10 10:30:46 -070010648 /* Disable softirqs, including timers from obtaining phba->hbalock */
10649 local_bh_disable();
10650
James Smart04c68492009-05-22 14:52:52 -040010651 /* Flush all the mailbox commands in the mbox system */
10652 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart523128e2018-09-10 10:30:46 -070010653
James Smart04c68492009-05-22 14:52:52 -040010654 /* The pending mailbox command queue */
10655 list_splice_init(&phba->sli.mboxq, &completions);
10656 /* The outstanding active mailbox command */
10657 if (psli->mbox_active) {
10658 list_add_tail(&psli->mbox_active->list, &completions);
10659 psli->mbox_active = NULL;
10660 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10661 }
10662 /* The completed mailbox command queue */
10663 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10664 spin_unlock_irqrestore(&phba->hbalock, iflag);
10665
James Smart523128e2018-09-10 10:30:46 -070010666 /* Enable softirqs again, done with phba->hbalock */
10667 local_bh_enable();
10668
James Smart04c68492009-05-22 14:52:52 -040010669 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10670 while (!list_empty(&completions)) {
10671 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10672 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10673 if (pmb->mbox_cmpl)
10674 pmb->mbox_cmpl(phba, pmb);
10675 }
10676}
10677
10678/**
James Smart3621a712009-04-06 18:47:14 -040010679 * lpfc_sli_host_down - Vport cleanup function
James Smarte59058c2008-08-24 21:49:00 -040010680 * @vport: Pointer to virtual port object.
10681 *
10682 * lpfc_sli_host_down is called to clean up the resources
10683 * associated with a vport before destroying virtual
10684 * port data structures.
10685 * This function does following operations:
10686 * - Free discovery resources associated with this virtual
10687 * port.
10688 * - Free iocbs associated with this virtual port in
10689 * the txq.
10690 * - Send abort for all iocb commands associated with this
10691 * vport in txcmplq.
10692 *
10693 * This function is called with no lock held and always returns 1.
10694 **/
dea31012005-04-17 16:05:31 -050010695int
James Smart92d7f7b2007-06-17 19:56:38 -050010696lpfc_sli_host_down(struct lpfc_vport *vport)
10697{
James Smart858c9f62007-06-17 19:56:39 -050010698 LIST_HEAD(completions);
James Smart92d7f7b2007-06-17 19:56:38 -050010699 struct lpfc_hba *phba = vport->phba;
10700 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -080010701 struct lpfc_queue *qp = NULL;
James Smart92d7f7b2007-06-17 19:56:38 -050010702 struct lpfc_sli_ring *pring;
10703 struct lpfc_iocbq *iocb, *next_iocb;
James Smart92d7f7b2007-06-17 19:56:38 -050010704 int i;
10705 unsigned long flags = 0;
10706 uint16_t prev_pring_flag;
10707
10708 lpfc_cleanup_discovery_resources(vport);
10709
10710 spin_lock_irqsave(&phba->hbalock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010711
James Smart895427b2017-02-12 13:52:30 -080010712 /*
10713 * Error everything on the txq since these iocbs
10714 * have not been given to the FW yet.
10715 * Also issue ABTS for everything on the txcmplq
10716 */
10717 if (phba->sli_rev != LPFC_SLI_REV4) {
10718 for (i = 0; i < psli->num_rings; i++) {
10719 pring = &psli->sli3_ring[i];
10720 prev_pring_flag = pring->flag;
10721 /* Only slow rings */
10722 if (pring->ringno == LPFC_ELS_RING) {
10723 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10724 /* Set the lpfc data pending flag */
10725 set_bit(LPFC_DATA_READY, &phba->data_flags);
10726 }
10727 list_for_each_entry_safe(iocb, next_iocb,
10728 &pring->txq, list) {
10729 if (iocb->vport != vport)
10730 continue;
10731 list_move_tail(&iocb->list, &completions);
10732 }
10733 list_for_each_entry_safe(iocb, next_iocb,
10734 &pring->txcmplq, list) {
10735 if (iocb->vport != vport)
10736 continue;
10737 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10738 }
10739 pring->flag = prev_pring_flag;
James Smart92d7f7b2007-06-17 19:56:38 -050010740 }
James Smart895427b2017-02-12 13:52:30 -080010741 } else {
10742 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10743 pring = qp->pring;
10744 if (!pring)
10745 continue;
10746 if (pring == phba->sli4_hba.els_wq->pring) {
10747 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10748 /* Set the lpfc data pending flag */
10749 set_bit(LPFC_DATA_READY, &phba->data_flags);
10750 }
10751 prev_pring_flag = pring->flag;
James Smart65a3df62019-09-21 20:58:48 -070010752 spin_lock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080010753 list_for_each_entry_safe(iocb, next_iocb,
10754 &pring->txq, list) {
10755 if (iocb->vport != vport)
10756 continue;
10757 list_move_tail(&iocb->list, &completions);
10758 }
James Smart65a3df62019-09-21 20:58:48 -070010759 spin_unlock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080010760 list_for_each_entry_safe(iocb, next_iocb,
10761 &pring->txcmplq, list) {
10762 if (iocb->vport != vport)
10763 continue;
10764 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10765 }
10766 pring->flag = prev_pring_flag;
10767 }
James Smart92d7f7b2007-06-17 19:56:38 -050010768 }
James Smart92d7f7b2007-06-17 19:56:38 -050010769 spin_unlock_irqrestore(&phba->hbalock, flags);
10770
James Smarta257bf92009-04-06 18:48:10 -040010771 /* Cancel all the IOCBs from the completions list */
10772 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10773 IOERR_SLI_DOWN);
James Smart92d7f7b2007-06-17 19:56:38 -050010774 return 1;
10775}
10776
James Smarte59058c2008-08-24 21:49:00 -040010777/**
James Smart3621a712009-04-06 18:47:14 -040010778 * lpfc_sli_hba_down - Resource cleanup function for the HBA
James Smarte59058c2008-08-24 21:49:00 -040010779 * @phba: Pointer to HBA context object.
10780 *
10781 * This function cleans up all iocb, buffers, mailbox commands
10782 * while shutting down the HBA. This function is called with no
10783 * lock held and always returns 1.
10784 * This function does the following to cleanup driver resources:
10785 * - Free discovery resources for each virtual port
10786 * - Cleanup any pending fabric iocbs
10787 * - Iterate through the iocb txq and free each entry
10788 * in the list.
10789 * - Free up any buffer posted to the HBA
10790 * - Free mailbox commands in the mailbox queue.
10791 **/
James Smart92d7f7b2007-06-17 19:56:38 -050010792int
James Smart2e0fef82007-06-17 19:56:36 -050010793lpfc_sli_hba_down(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -050010794{
James Smart2534ba72007-04-25 09:52:20 -040010795 LIST_HEAD(completions);
James Smart2e0fef82007-06-17 19:56:36 -050010796 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -080010797 struct lpfc_queue *qp = NULL;
dea31012005-04-17 16:05:31 -050010798 struct lpfc_sli_ring *pring;
James Smart0ff10d42008-01-11 01:52:36 -050010799 struct lpfc_dmabuf *buf_ptr;
dea31012005-04-17 16:05:31 -050010800 unsigned long flags = 0;
James Smart04c68492009-05-22 14:52:52 -040010801 int i;
10802
10803 /* Shutdown the mailbox command sub-system */
James Smart618a5232012-06-12 13:54:36 -040010804 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea31012005-04-17 16:05:31 -050010805
dea31012005-04-17 16:05:31 -050010806 lpfc_hba_down_prep(phba);
10807
James Smart523128e2018-09-10 10:30:46 -070010808 /* Disable softirqs, including timers from obtaining phba->hbalock */
10809 local_bh_disable();
10810
James Smart92d7f7b2007-06-17 19:56:38 -050010811 lpfc_fabric_abort_hba(phba);
10812
James Smart2e0fef82007-06-17 19:56:36 -050010813 spin_lock_irqsave(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050010814
James Smart895427b2017-02-12 13:52:30 -080010815 /*
10816 * Error everything on the txq since these iocbs
10817 * have not been given to the FW yet.
10818 */
10819 if (phba->sli_rev != LPFC_SLI_REV4) {
10820 for (i = 0; i < psli->num_rings; i++) {
10821 pring = &psli->sli3_ring[i];
10822 /* Only slow rings */
10823 if (pring->ringno == LPFC_ELS_RING) {
10824 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10825 /* Set the lpfc data pending flag */
10826 set_bit(LPFC_DATA_READY, &phba->data_flags);
10827 }
10828 list_splice_init(&pring->txq, &completions);
10829 }
10830 } else {
10831 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10832 pring = qp->pring;
10833 if (!pring)
10834 continue;
James Smart4b0a42b2019-08-14 16:56:42 -070010835 spin_lock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080010836 list_splice_init(&pring->txq, &completions);
James Smart4b0a42b2019-08-14 16:56:42 -070010837 spin_unlock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080010838 if (pring == phba->sli4_hba.els_wq->pring) {
10839 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10840 /* Set the lpfc data pending flag */
10841 set_bit(LPFC_DATA_READY, &phba->data_flags);
10842 }
10843 }
dea31012005-04-17 16:05:31 -050010844 }
James Smart2e0fef82007-06-17 19:56:36 -050010845 spin_unlock_irqrestore(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050010846
James Smarta257bf92009-04-06 18:48:10 -040010847 /* Cancel all the IOCBs from the completions list */
10848 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10849 IOERR_SLI_DOWN);
James Smart2534ba72007-04-25 09:52:20 -040010850
James Smart0ff10d42008-01-11 01:52:36 -050010851 spin_lock_irqsave(&phba->hbalock, flags);
10852 list_splice_init(&phba->elsbuf, &completions);
10853 phba->elsbuf_cnt = 0;
10854 phba->elsbuf_prev_cnt = 0;
10855 spin_unlock_irqrestore(&phba->hbalock, flags);
10856
10857 while (!list_empty(&completions)) {
10858 list_remove_head(&completions, buf_ptr,
10859 struct lpfc_dmabuf, list);
10860 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10861 kfree(buf_ptr);
10862 }
10863
James Smart523128e2018-09-10 10:30:46 -070010864 /* Enable softirqs again, done with phba->hbalock */
10865 local_bh_enable();
10866
dea31012005-04-17 16:05:31 -050010867 /* Return any active mbox cmds */
10868 del_timer_sync(&psli->mbox_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -050010869
James Smartda0436e2009-05-22 14:51:39 -040010870 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010871 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
James Smartda0436e2009-05-22 14:51:39 -040010872 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010873
James Smartda0436e2009-05-22 14:51:39 -040010874 return 1;
10875}
James Smart92d7f7b2007-06-17 19:56:38 -050010876
James Smartda0436e2009-05-22 14:51:39 -040010877/**
James Smart3621a712009-04-06 18:47:14 -040010878 * lpfc_sli_pcimem_bcopy - SLI memory copy function
James Smarte59058c2008-08-24 21:49:00 -040010879 * @srcp: Source memory pointer.
10880 * @destp: Destination memory pointer.
10881 * @cnt: Number of words required to be copied.
10882 *
10883 * This function is used for copying data between driver memory
10884 * and the SLI memory. This function also changes the endianness
10885 * of each word if native endianness is different from SLI
10886 * endianness. This function can be called with or without
10887 * lock.
10888 **/
dea31012005-04-17 16:05:31 -050010889void
10890lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10891{
10892 uint32_t *src = srcp;
10893 uint32_t *dest = destp;
10894 uint32_t ldata;
10895 int i;
10896
10897 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10898 ldata = *src;
10899 ldata = le32_to_cpu(ldata);
10900 *dest = ldata;
10901 src++;
10902 dest++;
10903 }
10904}
10905
James Smarte59058c2008-08-24 21:49:00 -040010906
10907/**
James Smarta0c87cb2009-07-19 10:01:10 -040010908 * lpfc_sli_bemem_bcopy - SLI memory copy function
10909 * @srcp: Source memory pointer.
10910 * @destp: Destination memory pointer.
10911 * @cnt: Number of words required to be copied.
10912 *
10913 * This function is used for copying data between a data structure
10914 * with big endian representation to local endianness.
10915 * This function can be called with or without lock.
10916 **/
10917void
10918lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10919{
10920 uint32_t *src = srcp;
10921 uint32_t *dest = destp;
10922 uint32_t ldata;
10923 int i;
10924
10925 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10926 ldata = *src;
10927 ldata = be32_to_cpu(ldata);
10928 *dest = ldata;
10929 src++;
10930 dest++;
10931 }
10932}
10933
10934/**
James Smart3621a712009-04-06 18:47:14 -040010935 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
James Smarte59058c2008-08-24 21:49:00 -040010936 * @phba: Pointer to HBA context object.
10937 * @pring: Pointer to driver SLI ring object.
10938 * @mp: Pointer to driver buffer object.
10939 *
10940 * This function is called with no lock held.
10941 * It always return zero after adding the buffer to the postbufq
10942 * buffer list.
10943 **/
dea31012005-04-17 16:05:31 -050010944int
James Smart2e0fef82007-06-17 19:56:36 -050010945lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10946 struct lpfc_dmabuf *mp)
dea31012005-04-17 16:05:31 -050010947{
10948 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10949 later */
James Smart2e0fef82007-06-17 19:56:36 -050010950 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010951 list_add_tail(&mp->list, &pring->postbufq);
dea31012005-04-17 16:05:31 -050010952 pring->postbufq_cnt++;
James Smart2e0fef82007-06-17 19:56:36 -050010953 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010954 return 0;
10955}
10956
James Smarte59058c2008-08-24 21:49:00 -040010957/**
James Smart3621a712009-04-06 18:47:14 -040010958 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
James Smarte59058c2008-08-24 21:49:00 -040010959 * @phba: Pointer to HBA context object.
10960 *
10961 * When HBQ is enabled, buffers are searched based on tags. This function
10962 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10963 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10964 * does not conflict with tags of buffer posted for unsolicited events.
10965 * The function returns the allocated tag. The function is called with
10966 * no locks held.
10967 **/
James Smart76bb24e2007-10-27 13:38:00 -040010968uint32_t
10969lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10970{
10971 spin_lock_irq(&phba->hbalock);
10972 phba->buffer_tag_count++;
10973 /*
10974 * Always set the QUE_BUFTAG_BIT to distiguish between
10975 * a tag assigned by HBQ.
10976 */
10977 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10978 spin_unlock_irq(&phba->hbalock);
10979 return phba->buffer_tag_count;
10980}
10981
James Smarte59058c2008-08-24 21:49:00 -040010982/**
James Smart3621a712009-04-06 18:47:14 -040010983 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
James Smarte59058c2008-08-24 21:49:00 -040010984 * @phba: Pointer to HBA context object.
10985 * @pring: Pointer to driver SLI ring object.
10986 * @tag: Buffer tag.
10987 *
10988 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10989 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10990 * iocb is posted to the response ring with the tag of the buffer.
10991 * This function searches the pring->postbufq list using the tag
10992 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10993 * iocb. If the buffer is found then lpfc_dmabuf object of the
10994 * buffer is returned to the caller else NULL is returned.
10995 * This function is called with no lock held.
10996 **/
James Smart76bb24e2007-10-27 13:38:00 -040010997struct lpfc_dmabuf *
10998lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10999 uint32_t tag)
11000{
11001 struct lpfc_dmabuf *mp, *next_mp;
11002 struct list_head *slp = &pring->postbufq;
11003
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011004 /* Search postbufq, from the beginning, looking for a match on tag */
James Smart76bb24e2007-10-27 13:38:00 -040011005 spin_lock_irq(&phba->hbalock);
11006 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11007 if (mp->buffer_tag == tag) {
11008 list_del_init(&mp->list);
11009 pring->postbufq_cnt--;
11010 spin_unlock_irq(&phba->hbalock);
11011 return mp;
11012 }
11013 }
11014
11015 spin_unlock_irq(&phba->hbalock);
11016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartd7c255b2008-08-24 21:50:00 -040011017 "0402 Cannot find virtual addr for buffer tag on "
James Smart32350662019-08-14 16:57:06 -070011018 "ring %d Data x%lx x%px x%px x%x\n",
James Smart76bb24e2007-10-27 13:38:00 -040011019 pring->ringno, (unsigned long) tag,
11020 slp->next, slp->prev, pring->postbufq_cnt);
11021
11022 return NULL;
11023}
dea31012005-04-17 16:05:31 -050011024
James Smarte59058c2008-08-24 21:49:00 -040011025/**
James Smart3621a712009-04-06 18:47:14 -040011026 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
James Smarte59058c2008-08-24 21:49:00 -040011027 * @phba: Pointer to HBA context object.
11028 * @pring: Pointer to driver SLI ring object.
11029 * @phys: DMA address of the buffer.
11030 *
11031 * This function searches the buffer list using the dma_address
11032 * of unsolicited event to find the driver's lpfc_dmabuf object
11033 * corresponding to the dma_address. The function returns the
11034 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11035 * This function is called by the ct and els unsolicited event
11036 * handlers to get the buffer associated with the unsolicited
11037 * event.
11038 *
11039 * This function is called with no lock held.
11040 **/
dea31012005-04-17 16:05:31 -050011041struct lpfc_dmabuf *
11042lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11043 dma_addr_t phys)
11044{
11045 struct lpfc_dmabuf *mp, *next_mp;
11046 struct list_head *slp = &pring->postbufq;
11047
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011048 /* Search postbufq, from the beginning, looking for a match on phys */
James Smart2e0fef82007-06-17 19:56:36 -050011049 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011050 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11051 if (mp->phys == phys) {
11052 list_del_init(&mp->list);
11053 pring->postbufq_cnt--;
James Smart2e0fef82007-06-17 19:56:36 -050011054 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011055 return mp;
11056 }
11057 }
11058
James Smart2e0fef82007-06-17 19:56:36 -050011059 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -040011061 "0410 Cannot find virtual addr for mapped buf on "
James Smart32350662019-08-14 16:57:06 -070011062 "ring %d Data x%llx x%px x%px x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011063 pring->ringno, (unsigned long long)phys,
dea31012005-04-17 16:05:31 -050011064 slp->next, slp->prev, pring->postbufq_cnt);
11065 return NULL;
11066}
11067
James Smarte59058c2008-08-24 21:49:00 -040011068/**
James Smart3621a712009-04-06 18:47:14 -040011069 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
James Smarte59058c2008-08-24 21:49:00 -040011070 * @phba: Pointer to HBA context object.
11071 * @cmdiocb: Pointer to driver command iocb object.
11072 * @rspiocb: Pointer to driver response iocb object.
11073 *
11074 * This function is the completion handler for the abort iocbs for
11075 * ELS commands. This function is called from the ELS ring event
11076 * handler with no lock held. This function frees memory resources
11077 * associated with the abort iocb.
11078 **/
dea31012005-04-17 16:05:31 -050011079static void
James Smart2e0fef82007-06-17 19:56:36 -050011080lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11081 struct lpfc_iocbq *rspiocb)
dea31012005-04-17 16:05:31 -050011082{
James Smart2e0fef82007-06-17 19:56:36 -050011083 IOCB_t *irsp = &rspiocb->iocb;
James Smart2680eea2007-04-25 09:52:55 -040011084 uint16_t abort_iotag, abort_context;
James Smartff78d8f2011-12-13 13:21:35 -050011085 struct lpfc_iocbq *abort_iocb = NULL;
James Smart2680eea2007-04-25 09:52:55 -040011086
11087 if (irsp->ulpStatus) {
James Smartff78d8f2011-12-13 13:21:35 -050011088
11089 /*
11090 * Assume that the port already completed and returned, or
11091 * will return the iocb. Just Log the message.
11092 */
James Smart2680eea2007-04-25 09:52:55 -040011093 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11094 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11095
James Smart2e0fef82007-06-17 19:56:36 -050011096 spin_lock_irq(&phba->hbalock);
James Smart45ed1192009-10-02 15:17:02 -040011097 if (phba->sli_rev < LPFC_SLI_REV4) {
James Smartfaa832e2018-07-31 17:23:18 -070011098 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11099 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11100 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11101 spin_unlock_irq(&phba->hbalock);
11102 goto release_iocb;
11103 }
James Smart45ed1192009-10-02 15:17:02 -040011104 if (abort_iotag != 0 &&
11105 abort_iotag <= phba->sli.last_iotag)
11106 abort_iocb =
11107 phba->sli.iocbq_lookup[abort_iotag];
11108 } else
11109 /* For sli4 the abort_tag is the XRI,
11110 * so the abort routine puts the iotag of the iocb
11111 * being aborted in the context field of the abort
11112 * IOCB.
11113 */
11114 abort_iocb = phba->sli.iocbq_lookup[abort_context];
James Smart2680eea2007-04-25 09:52:55 -040011115
James Smart2a9bf3d2010-06-07 15:24:45 -040011116 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
James Smart32350662019-08-14 16:57:06 -070011117 "0327 Cannot abort els iocb x%px "
James Smart2a9bf3d2010-06-07 15:24:45 -040011118 "with tag %x context %x, abort status %x, "
11119 "abort code %x\n",
11120 abort_iocb, abort_iotag, abort_context,
11121 irsp->ulpStatus, irsp->un.ulpWord[4]);
James Smart2680eea2007-04-25 09:52:55 -040011122
James Smartff78d8f2011-12-13 13:21:35 -050011123 spin_unlock_irq(&phba->hbalock);
James Smart2680eea2007-04-25 09:52:55 -040011124 }
James Smartfaa832e2018-07-31 17:23:18 -070011125release_iocb:
James Bottomley604a3e32005-10-29 10:28:33 -050011126 lpfc_sli_release_iocbq(phba, cmdiocb);
dea31012005-04-17 16:05:31 -050011127 return;
11128}
11129
James Smarte59058c2008-08-24 21:49:00 -040011130/**
James Smart3621a712009-04-06 18:47:14 -040011131 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
James Smarte59058c2008-08-24 21:49:00 -040011132 * @phba: Pointer to HBA context object.
11133 * @cmdiocb: Pointer to driver command iocb object.
11134 * @rspiocb: Pointer to driver response iocb object.
11135 *
11136 * The function is called from SLI ring event handler with no
11137 * lock held. This function is the completion handler for ELS commands
11138 * which are aborted. The function frees memory resources used for
11139 * the aborted ELS commands.
11140 **/
James Smart92d7f7b2007-06-17 19:56:38 -050011141static void
11142lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11143 struct lpfc_iocbq *rspiocb)
11144{
11145 IOCB_t *irsp = &rspiocb->iocb;
11146
11147 /* ELS cmd tag <ulpIoTag> completes */
11148 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smartd7c255b2008-08-24 21:50:00 -040011149 "0139 Ignoring ELS cmd tag x%x completion Data: "
James Smart92d7f7b2007-06-17 19:56:38 -050011150 "x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011151 irsp->ulpIoTag, irsp->ulpStatus,
James Smart92d7f7b2007-06-17 19:56:38 -050011152 irsp->un.ulpWord[4], irsp->ulpTimeout);
James Smart858c9f62007-06-17 19:56:39 -050011153 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11154 lpfc_ct_free_iocb(phba, cmdiocb);
11155 else
11156 lpfc_els_free_iocb(phba, cmdiocb);
James Smart92d7f7b2007-06-17 19:56:38 -050011157 return;
11158}
11159
James Smarte59058c2008-08-24 21:49:00 -040011160/**
James Smart5af5eee2010-10-22 11:06:38 -040011161 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
James Smarte59058c2008-08-24 21:49:00 -040011162 * @phba: Pointer to HBA context object.
11163 * @pring: Pointer to driver SLI ring object.
11164 * @cmdiocb: Pointer to driver command iocb object.
11165 *
James Smart5af5eee2010-10-22 11:06:38 -040011166 * This function issues an abort iocb for the provided command iocb down to
11167 * the port. Other than the case the outstanding command iocb is an abort
11168 * request, this function issues abort out unconditionally. This function is
11169 * called with hbalock held. The function returns 0 when it fails due to
11170 * memory allocation failure or when the command iocb is an abort request.
Dick Kennedy88acb4d2020-05-01 14:43:07 -070011171 * The hbalock is asserted held in the code path calling this routine.
James Smarte59058c2008-08-24 21:49:00 -040011172 **/
James Smart5af5eee2010-10-22 11:06:38 -040011173static int
11174lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -050011175 struct lpfc_iocbq *cmdiocb)
dea31012005-04-17 16:05:31 -050011176{
James Smart2e0fef82007-06-17 19:56:36 -050011177 struct lpfc_vport *vport = cmdiocb->vport;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011178 struct lpfc_iocbq *abtsiocbp;
dea31012005-04-17 16:05:31 -050011179 IOCB_t *icmd = NULL;
11180 IOCB_t *iabt = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040011181 int retval;
James Smart7e56aa22012-08-03 12:35:34 -040011182 unsigned long iflags;
James Smartfaa832e2018-07-31 17:23:18 -070011183 struct lpfc_nodelist *ndlp;
James Smart07951072007-04-25 09:51:38 -040011184
James Smart92d7f7b2007-06-17 19:56:38 -050011185 /*
11186 * There are certain command types we don't want to abort. And we
11187 * don't want to abort commands that are already in the process of
11188 * being aborted.
James Smart07951072007-04-25 09:51:38 -040011189 */
11190 icmd = &cmdiocb->iocb;
James Smart2e0fef82007-06-17 19:56:36 -050011191 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
James Smart92d7f7b2007-06-17 19:56:38 -050011192 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11193 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
James Smart07951072007-04-25 09:51:38 -040011194 return 0;
11195
dea31012005-04-17 16:05:31 -050011196 /* issue ABTS for this IOCB based on iotag */
James Smart92d7f7b2007-06-17 19:56:38 -050011197 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050011198 if (abtsiocbp == NULL)
11199 return 0;
dea31012005-04-17 16:05:31 -050011200
James Smart07951072007-04-25 09:51:38 -040011201 /* This signals the response to set the correct status
James Smart341af102010-01-26 23:07:37 -050011202 * before calling the completion handler
James Smart07951072007-04-25 09:51:38 -040011203 */
11204 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11205
dea31012005-04-17 16:05:31 -050011206 iabt = &abtsiocbp->iocb;
James Smart07951072007-04-25 09:51:38 -040011207 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11208 iabt->un.acxri.abortContextTag = icmd->ulpContext;
James Smart45ed1192009-10-02 15:17:02 -040011209 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -040011210 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
James Smart45ed1192009-10-02 15:17:02 -040011211 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
James Smartfaa832e2018-07-31 17:23:18 -070011212 } else {
James Smartda0436e2009-05-22 14:51:39 -040011213 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
James Smartfaa832e2018-07-31 17:23:18 -070011214 if (pring->ringno == LPFC_ELS_RING) {
11215 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11216 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11217 }
11218 }
dea31012005-04-17 16:05:31 -050011219 iabt->ulpLe = 1;
James Smart07951072007-04-25 09:51:38 -040011220 iabt->ulpClass = icmd->ulpClass;
dea31012005-04-17 16:05:31 -050011221
James Smart5ffc2662009-11-18 15:39:44 -050011222 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080011223 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
James Smart341af102010-01-26 23:07:37 -050011224 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11225 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040011226 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11227 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050011228
James Smart2e0fef82007-06-17 19:56:36 -050011229 if (phba->link_state >= LPFC_LINK_UP)
James Smart07951072007-04-25 09:51:38 -040011230 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11231 else
11232 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11233
11234 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
James Smarte6c6acc2016-12-19 15:07:23 -080011235 abtsiocbp->vport = vport;
James Smart5b8bd0c2007-04-25 09:52:49 -040011236
James Smarte8b62012007-08-02 11:10:09 -040011237 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11238 "0339 Abort xri x%x, original iotag x%x, "
11239 "abort cmd iotag x%x\n",
James Smart2a9bf3d2010-06-07 15:24:45 -040011240 iabt->un.acxri.abortIoTag,
James Smarte8b62012007-08-02 11:10:09 -040011241 iabt->un.acxri.abortContextTag,
James Smart2a9bf3d2010-06-07 15:24:45 -040011242 abtsiocbp->iotag);
James Smart7e56aa22012-08-03 12:35:34 -040011243
11244 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -080011245 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11246 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -040011247 return 0;
James Smart7e56aa22012-08-03 12:35:34 -040011248 /* Note: both hbalock and ring_lock need to be set here */
11249 spin_lock_irqsave(&pring->ring_lock, iflags);
11250 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11251 abtsiocbp, 0);
11252 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11253 } else {
11254 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11255 abtsiocbp, 0);
11256 }
James Smart07951072007-04-25 09:51:38 -040011257
James Smartd7c255b2008-08-24 21:50:00 -040011258 if (retval)
11259 __lpfc_sli_release_iocbq(phba, abtsiocbp);
James Smart5af5eee2010-10-22 11:06:38 -040011260
11261 /*
11262 * Caller to this routine should check for IOCB_ERROR
11263 * and handle it properly. This routine no longer removes
11264 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11265 */
11266 return retval;
11267}
11268
11269/**
11270 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11271 * @phba: Pointer to HBA context object.
11272 * @pring: Pointer to driver SLI ring object.
11273 * @cmdiocb: Pointer to driver command iocb object.
11274 *
11275 * This function issues an abort iocb for the provided command iocb. In case
11276 * of unloading, the abort iocb will not be issued to commands on the ELS
11277 * ring. Instead, the callback function shall be changed to those commands
11278 * so that nothing happens when them finishes. This function is called with
11279 * hbalock held. The function returns 0 when the command iocb is an abort
11280 * request.
11281 **/
11282int
11283lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11284 struct lpfc_iocbq *cmdiocb)
11285{
11286 struct lpfc_vport *vport = cmdiocb->vport;
11287 int retval = IOCB_ERROR;
11288 IOCB_t *icmd = NULL;
11289
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010011290 lockdep_assert_held(&phba->hbalock);
11291
James Smart5af5eee2010-10-22 11:06:38 -040011292 /*
11293 * There are certain command types we don't want to abort. And we
11294 * don't want to abort commands that are already in the process of
11295 * being aborted.
11296 */
11297 icmd = &cmdiocb->iocb;
11298 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11299 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11300 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11301 return 0;
11302
Dick Kennedy1234a6d2017-09-29 17:34:29 -070011303 if (!pring) {
11304 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11305 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11306 else
11307 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11308 goto abort_iotag_exit;
11309 }
11310
James Smart5af5eee2010-10-22 11:06:38 -040011311 /*
11312 * If we're unloading, don't abort iocb on the ELS ring, but change
11313 * the callback so that nothing happens when it finishes.
11314 */
11315 if ((vport->load_flag & FC_UNLOADING) &&
11316 (pring->ringno == LPFC_ELS_RING)) {
11317 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11318 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11319 else
11320 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11321 goto abort_iotag_exit;
11322 }
11323
11324 /* Now, we try to issue the abort to the cmdiocb out */
11325 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11326
James Smart07951072007-04-25 09:51:38 -040011327abort_iotag_exit:
James Smart2e0fef82007-06-17 19:56:36 -050011328 /*
11329 * Caller to this routine should check for IOCB_ERROR
11330 * and handle it properly. This routine no longer removes
11331 * iocb off txcmplq and call compl in case of IOCB_ERROR.
James Smart07951072007-04-25 09:51:38 -040011332 */
James Smart2e0fef82007-06-17 19:56:36 -050011333 return retval;
dea31012005-04-17 16:05:31 -050011334}
11335
James Smarte59058c2008-08-24 21:49:00 -040011336/**
James Smart5af5eee2010-10-22 11:06:38 -040011337 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11338 * @phba: pointer to lpfc HBA data structure.
11339 *
11340 * This routine will abort all pending and outstanding iocbs to an HBA.
11341 **/
11342void
11343lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11344{
11345 struct lpfc_sli *psli = &phba->sli;
11346 struct lpfc_sli_ring *pring;
James Smart895427b2017-02-12 13:52:30 -080011347 struct lpfc_queue *qp = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040011348 int i;
11349
James Smart895427b2017-02-12 13:52:30 -080011350 if (phba->sli_rev != LPFC_SLI_REV4) {
11351 for (i = 0; i < psli->num_rings; i++) {
11352 pring = &psli->sli3_ring[i];
11353 lpfc_sli_abort_iocb_ring(phba, pring);
11354 }
11355 return;
11356 }
11357 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11358 pring = qp->pring;
11359 if (!pring)
11360 continue;
James Smartdb55fba2014-04-04 13:52:02 -040011361 lpfc_sli_abort_iocb_ring(phba, pring);
James Smart5af5eee2010-10-22 11:06:38 -040011362 }
11363}
11364
11365/**
James Smart3621a712009-04-06 18:47:14 -040011366 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
James Smarte59058c2008-08-24 21:49:00 -040011367 * @iocbq: Pointer to driver iocb object.
11368 * @vport: Pointer to driver virtual port object.
11369 * @tgt_id: SCSI ID of the target.
11370 * @lun_id: LUN ID of the scsi device.
11371 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11372 *
James Smart3621a712009-04-06 18:47:14 -040011373 * This function acts as an iocb filter for functions which abort or count
James Smarte59058c2008-08-24 21:49:00 -040011374 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11375 * 0 if the filtering criteria is met for the given iocb and will return
11376 * 1 if the filtering criteria is not met.
11377 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11378 * given iocb is for the SCSI device specified by vport, tgt_id and
11379 * lun_id parameter.
11380 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11381 * given iocb is for the SCSI target specified by vport and tgt_id
11382 * parameters.
11383 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11384 * given iocb is for the SCSI host associated with the given vport.
11385 * This function is called with no locks held.
11386 **/
dea31012005-04-17 16:05:31 -050011387static int
James Smart51ef4c22007-08-02 11:10:31 -040011388lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11389 uint16_t tgt_id, uint64_t lun_id,
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011390 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050011391{
James Smartc4908502019-01-28 11:14:28 -080011392 struct lpfc_io_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050011393 int rc = 1;
11394
James Smartb0e83012018-06-26 08:24:29 -070011395 if (iocbq->vport != vport)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011396 return rc;
11397
James Smartb0e83012018-06-26 08:24:29 -070011398 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11399 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
James Smart51ef4c22007-08-02 11:10:31 -040011400 return rc;
11401
James Smartc4908502019-01-28 11:14:28 -080011402 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011403
James Smart495a7142008-06-14 22:52:59 -040011404 if (lpfc_cmd->pCmd == NULL)
dea31012005-04-17 16:05:31 -050011405 return rc;
11406
11407 switch (ctx_cmd) {
11408 case LPFC_CTX_LUN:
James Smartb0e83012018-06-26 08:24:29 -070011409 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
James Smart495a7142008-06-14 22:52:59 -040011410 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11411 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea31012005-04-17 16:05:31 -050011412 rc = 0;
11413 break;
11414 case LPFC_CTX_TGT:
James Smartb0e83012018-06-26 08:24:29 -070011415 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
James Smart495a7142008-06-14 22:52:59 -040011416 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea31012005-04-17 16:05:31 -050011417 rc = 0;
11418 break;
dea31012005-04-17 16:05:31 -050011419 case LPFC_CTX_HOST:
11420 rc = 0;
11421 break;
11422 default:
11423 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -070011424 __func__, ctx_cmd);
dea31012005-04-17 16:05:31 -050011425 break;
11426 }
11427
11428 return rc;
11429}
11430
James Smarte59058c2008-08-24 21:49:00 -040011431/**
James Smart3621a712009-04-06 18:47:14 -040011432 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
James Smarte59058c2008-08-24 21:49:00 -040011433 * @vport: Pointer to virtual port.
11434 * @tgt_id: SCSI ID of the target.
11435 * @lun_id: LUN ID of the scsi device.
11436 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11437 *
11438 * This function returns number of FCP commands pending for the vport.
11439 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11440 * commands pending on the vport associated with SCSI device specified
11441 * by tgt_id and lun_id parameters.
11442 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11443 * commands pending on the vport associated with SCSI target specified
11444 * by tgt_id parameter.
11445 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11446 * commands pending on the vport.
11447 * This function returns the number of iocbs which satisfy the filter.
11448 * This function is called without any lock held.
11449 **/
dea31012005-04-17 16:05:31 -050011450int
James Smart51ef4c22007-08-02 11:10:31 -040011451lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11452 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050011453{
James Smart51ef4c22007-08-02 11:10:31 -040011454 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011455 struct lpfc_iocbq *iocbq;
11456 int sum, i;
dea31012005-04-17 16:05:31 -050011457
Johannes Thumshirn31979002016-07-18 16:06:03 +020011458 spin_lock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011459 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11460 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050011461
James Smart51ef4c22007-08-02 11:10:31 -040011462 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11463 ctx_cmd) == 0)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011464 sum++;
dea31012005-04-17 16:05:31 -050011465 }
Johannes Thumshirn31979002016-07-18 16:06:03 +020011466 spin_unlock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011467
dea31012005-04-17 16:05:31 -050011468 return sum;
11469}
11470
James Smarte59058c2008-08-24 21:49:00 -040011471/**
James Smart3621a712009-04-06 18:47:14 -040011472 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
James Smarte59058c2008-08-24 21:49:00 -040011473 * @phba: Pointer to HBA context object
11474 * @cmdiocb: Pointer to command iocb object.
11475 * @rspiocb: Pointer to response iocb object.
11476 *
11477 * This function is called when an aborted FCP iocb completes. This
11478 * function is called by the ring event handler with no lock held.
11479 * This function frees the iocb.
11480 **/
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011481void
James Smart2e0fef82007-06-17 19:56:36 -050011482lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11483 struct lpfc_iocbq *rspiocb)
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011484{
James Smartcb69f7d2011-12-13 13:21:57 -050011485 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart8e668af2013-05-31 17:04:28 -040011486 "3096 ABORT_XRI_CN completing on rpi x%x "
James Smartcb69f7d2011-12-13 13:21:57 -050011487 "original iotag x%x, abort cmd iotag x%x "
11488 "status 0x%x, reason 0x%x\n",
11489 cmdiocb->iocb.un.acxri.abortContextTag,
11490 cmdiocb->iocb.un.acxri.abortIoTag,
11491 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11492 rspiocb->iocb.un.ulpWord[4]);
James Bottomley604a3e32005-10-29 10:28:33 -050011493 lpfc_sli_release_iocbq(phba, cmdiocb);
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011494 return;
11495}
11496
James Smarte59058c2008-08-24 21:49:00 -040011497/**
James Smart3621a712009-04-06 18:47:14 -040011498 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
James Smarte59058c2008-08-24 21:49:00 -040011499 * @vport: Pointer to virtual port.
11500 * @pring: Pointer to driver SLI ring object.
11501 * @tgt_id: SCSI ID of the target.
11502 * @lun_id: LUN ID of the scsi device.
11503 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11504 *
11505 * This function sends an abort command for every SCSI command
11506 * associated with the given virtual port pending on the ring
11507 * filtered by lpfc_sli_validate_fcp_iocb function.
11508 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11509 * FCP iocbs associated with lun specified by tgt_id and lun_id
11510 * parameters
11511 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11512 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11513 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11514 * FCP iocbs associated with virtual port.
11515 * This function returns number of iocbs it failed to abort.
11516 * This function is called with no locks held.
11517 **/
dea31012005-04-17 16:05:31 -050011518int
James Smart51ef4c22007-08-02 11:10:31 -040011519lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11520 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea31012005-04-17 16:05:31 -050011521{
James Smart51ef4c22007-08-02 11:10:31 -040011522 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011523 struct lpfc_iocbq *iocbq;
11524 struct lpfc_iocbq *abtsiocb;
James Smartecbb2272017-06-01 21:07:04 -070011525 struct lpfc_sli_ring *pring_s4;
dea31012005-04-17 16:05:31 -050011526 IOCB_t *cmd = NULL;
dea31012005-04-17 16:05:31 -050011527 int errcnt = 0, ret_val = 0;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011528 int i;
dea31012005-04-17 16:05:31 -050011529
James Smartb0e83012018-06-26 08:24:29 -070011530 /* all I/Os are in process of being flushed */
James Smartc00f62e2019-08-14 16:57:11 -070011531 if (phba->hba_flag & HBA_IOQ_FLUSH)
James Smartb0e83012018-06-26 08:24:29 -070011532 return errcnt;
11533
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011534 for (i = 1; i <= phba->sli.last_iotag; i++) {
11535 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050011536
James Smart51ef4c22007-08-02 11:10:31 -040011537 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
James Smart2e0fef82007-06-17 19:56:36 -050011538 abort_cmd) != 0)
dea31012005-04-17 16:05:31 -050011539 continue;
11540
James Smartafbd8d82013-09-06 12:22:13 -040011541 /*
11542 * If the iocbq is already being aborted, don't take a second
11543 * action, but do count it.
11544 */
11545 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11546 continue;
11547
dea31012005-04-17 16:05:31 -050011548 /* issue ABTS for this IOCB based on iotag */
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011549 abtsiocb = lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050011550 if (abtsiocb == NULL) {
11551 errcnt++;
11552 continue;
11553 }
dea31012005-04-17 16:05:31 -050011554
James Smartafbd8d82013-09-06 12:22:13 -040011555 /* indicate the IO is being aborted by the driver. */
11556 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11557
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011558 cmd = &iocbq->iocb;
dea31012005-04-17 16:05:31 -050011559 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11560 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
James Smartda0436e2009-05-22 14:51:39 -040011561 if (phba->sli_rev == LPFC_SLI_REV4)
11562 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11563 else
11564 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea31012005-04-17 16:05:31 -050011565 abtsiocb->iocb.ulpLe = 1;
11566 abtsiocb->iocb.ulpClass = cmd->ulpClass;
James Smartafbd8d82013-09-06 12:22:13 -040011567 abtsiocb->vport = vport;
dea31012005-04-17 16:05:31 -050011568
James Smart5ffc2662009-11-18 15:39:44 -050011569 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080011570 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
James Smart341af102010-01-26 23:07:37 -050011571 if (iocbq->iocb_flag & LPFC_IO_FCP)
11572 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040011573 if (iocbq->iocb_flag & LPFC_IO_FOF)
11574 abtsiocb->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050011575
James Smart2e0fef82007-06-17 19:56:36 -050011576 if (lpfc_is_link_up(phba))
dea31012005-04-17 16:05:31 -050011577 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11578 else
11579 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11580
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011581 /* Setup callback routine and issue the command. */
11582 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
James Smartecbb2272017-06-01 21:07:04 -070011583 if (phba->sli_rev == LPFC_SLI_REV4) {
11584 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11585 if (!pring_s4)
11586 continue;
11587 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11588 abtsiocb, 0);
11589 } else
11590 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11591 abtsiocb, 0);
dea31012005-04-17 16:05:31 -050011592 if (ret_val == IOCB_ERROR) {
James Bottomley604a3e32005-10-29 10:28:33 -050011593 lpfc_sli_release_iocbq(phba, abtsiocb);
dea31012005-04-17 16:05:31 -050011594 errcnt++;
11595 continue;
11596 }
11597 }
11598
11599 return errcnt;
11600}
11601
James Smarte59058c2008-08-24 21:49:00 -040011602/**
James Smart98912dda2014-04-04 13:52:31 -040011603 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11604 * @vport: Pointer to virtual port.
11605 * @pring: Pointer to driver SLI ring object.
11606 * @tgt_id: SCSI ID of the target.
11607 * @lun_id: LUN ID of the scsi device.
11608 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11609 *
11610 * This function sends an abort command for every SCSI command
11611 * associated with the given virtual port pending on the ring
11612 * filtered by lpfc_sli_validate_fcp_iocb function.
11613 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11614 * FCP iocbs associated with lun specified by tgt_id and lun_id
11615 * parameters
11616 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11617 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11618 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11619 * FCP iocbs associated with virtual port.
11620 * This function returns number of iocbs it aborted .
11621 * This function is called with no locks held right after a taskmgmt
11622 * command is sent.
11623 **/
11624int
11625lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11626 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11627{
11628 struct lpfc_hba *phba = vport->phba;
James Smartc4908502019-01-28 11:14:28 -080011629 struct lpfc_io_buf *lpfc_cmd;
James Smart98912dda2014-04-04 13:52:31 -040011630 struct lpfc_iocbq *abtsiocbq;
James Smart8c50d252014-09-03 12:58:16 -040011631 struct lpfc_nodelist *ndlp;
James Smart98912dda2014-04-04 13:52:31 -040011632 struct lpfc_iocbq *iocbq;
11633 IOCB_t *icmd;
11634 int sum, i, ret_val;
11635 unsigned long iflags;
James Smartc2017262019-01-28 11:14:37 -080011636 struct lpfc_sli_ring *pring_s4 = NULL;
James Smart98912dda2014-04-04 13:52:31 -040011637
James Smart59c68ea2018-04-09 14:24:25 -070011638 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart98912dda2014-04-04 13:52:31 -040011639
11640 /* all I/Os are in process of being flushed */
James Smartc00f62e2019-08-14 16:57:11 -070011641 if (phba->hba_flag & HBA_IOQ_FLUSH) {
James Smart59c68ea2018-04-09 14:24:25 -070011642 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart98912dda2014-04-04 13:52:31 -040011643 return 0;
11644 }
11645 sum = 0;
11646
11647 for (i = 1; i <= phba->sli.last_iotag; i++) {
11648 iocbq = phba->sli.iocbq_lookup[i];
11649
11650 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11651 cmd) != 0)
11652 continue;
11653
James Smartc2017262019-01-28 11:14:37 -080011654 /* Guard against IO completion being called at same time */
11655 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11656 spin_lock(&lpfc_cmd->buf_lock);
11657
11658 if (!lpfc_cmd->pCmd) {
11659 spin_unlock(&lpfc_cmd->buf_lock);
11660 continue;
11661 }
11662
11663 if (phba->sli_rev == LPFC_SLI_REV4) {
11664 pring_s4 =
James Smartc00f62e2019-08-14 16:57:11 -070011665 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
James Smartc2017262019-01-28 11:14:37 -080011666 if (!pring_s4) {
11667 spin_unlock(&lpfc_cmd->buf_lock);
11668 continue;
11669 }
11670 /* Note: both hbalock and ring_lock must be set here */
11671 spin_lock(&pring_s4->ring_lock);
11672 }
11673
James Smart98912dda2014-04-04 13:52:31 -040011674 /*
11675 * If the iocbq is already being aborted, don't take a second
11676 * action, but do count it.
11677 */
James Smartc2017262019-01-28 11:14:37 -080011678 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11679 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11680 if (phba->sli_rev == LPFC_SLI_REV4)
11681 spin_unlock(&pring_s4->ring_lock);
11682 spin_unlock(&lpfc_cmd->buf_lock);
James Smart98912dda2014-04-04 13:52:31 -040011683 continue;
James Smartc2017262019-01-28 11:14:37 -080011684 }
James Smart98912dda2014-04-04 13:52:31 -040011685
11686 /* issue ABTS for this IOCB based on iotag */
11687 abtsiocbq = __lpfc_sli_get_iocbq(phba);
James Smartc2017262019-01-28 11:14:37 -080011688 if (!abtsiocbq) {
11689 if (phba->sli_rev == LPFC_SLI_REV4)
11690 spin_unlock(&pring_s4->ring_lock);
11691 spin_unlock(&lpfc_cmd->buf_lock);
James Smart98912dda2014-04-04 13:52:31 -040011692 continue;
James Smartc2017262019-01-28 11:14:37 -080011693 }
James Smart98912dda2014-04-04 13:52:31 -040011694
11695 icmd = &iocbq->iocb;
11696 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11697 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11698 if (phba->sli_rev == LPFC_SLI_REV4)
11699 abtsiocbq->iocb.un.acxri.abortIoTag =
11700 iocbq->sli4_xritag;
11701 else
11702 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11703 abtsiocbq->iocb.ulpLe = 1;
11704 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11705 abtsiocbq->vport = vport;
11706
11707 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080011708 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
James Smart98912dda2014-04-04 13:52:31 -040011709 if (iocbq->iocb_flag & LPFC_IO_FCP)
11710 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040011711 if (iocbq->iocb_flag & LPFC_IO_FOF)
11712 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
James Smart98912dda2014-04-04 13:52:31 -040011713
James Smart8c50d252014-09-03 12:58:16 -040011714 ndlp = lpfc_cmd->rdata->pnode;
11715
11716 if (lpfc_is_link_up(phba) &&
11717 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
James Smart98912dda2014-04-04 13:52:31 -040011718 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11719 else
11720 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11721
11722 /* Setup callback routine and issue the command. */
11723 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11724
11725 /*
11726 * Indicate the IO is being aborted by the driver and set
11727 * the caller's flag into the aborted IO.
11728 */
11729 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11730
11731 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart98912dda2014-04-04 13:52:31 -040011732 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11733 abtsiocbq, 0);
James Smart59c68ea2018-04-09 14:24:25 -070011734 spin_unlock(&pring_s4->ring_lock);
James Smart98912dda2014-04-04 13:52:31 -040011735 } else {
11736 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11737 abtsiocbq, 0);
11738 }
11739
James Smartc2017262019-01-28 11:14:37 -080011740 spin_unlock(&lpfc_cmd->buf_lock);
James Smart98912dda2014-04-04 13:52:31 -040011741
11742 if (ret_val == IOCB_ERROR)
11743 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11744 else
11745 sum++;
11746 }
James Smart59c68ea2018-04-09 14:24:25 -070011747 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart98912dda2014-04-04 13:52:31 -040011748 return sum;
11749}
11750
11751/**
James Smart3621a712009-04-06 18:47:14 -040011752 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
James Smarte59058c2008-08-24 21:49:00 -040011753 * @phba: Pointer to HBA context object.
11754 * @cmdiocbq: Pointer to command iocb.
11755 * @rspiocbq: Pointer to response iocb.
11756 *
11757 * This function is the completion handler for iocbs issued using
11758 * lpfc_sli_issue_iocb_wait function. This function is called by the
11759 * ring event handler function without any lock held. This function
11760 * can be called from both worker thread context and interrupt
11761 * context. This function also can be called from other thread which
11762 * cleans up the SLI layer objects.
11763 * This function copy the contents of the response iocb to the
11764 * response iocb memory object provided by the caller of
11765 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11766 * sleeps for the iocb completion.
11767 **/
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011768static void
11769lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11770 struct lpfc_iocbq *cmdiocbq,
11771 struct lpfc_iocbq *rspiocbq)
dea31012005-04-17 16:05:31 -050011772{
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011773 wait_queue_head_t *pdone_q;
11774 unsigned long iflags;
James Smartc4908502019-01-28 11:14:28 -080011775 struct lpfc_io_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050011776
James Smart2e0fef82007-06-17 19:56:36 -050011777 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart5a0916b2013-07-15 18:31:42 -040011778 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11779
11780 /*
11781 * A time out has occurred for the iocb. If a time out
11782 * completion handler has been supplied, call it. Otherwise,
11783 * just free the iocbq.
11784 */
11785
11786 spin_unlock_irqrestore(&phba->hbalock, iflags);
11787 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11788 cmdiocbq->wait_iocb_cmpl = NULL;
11789 if (cmdiocbq->iocb_cmpl)
11790 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11791 else
11792 lpfc_sli_release_iocbq(phba, cmdiocbq);
11793 return;
11794 }
11795
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011796 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11797 if (cmdiocbq->context2 && rspiocbq)
11798 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11799 &rspiocbq->iocb, sizeof(IOCB_t));
11800
James Smart0f65ff62010-02-26 14:14:23 -050011801 /* Set the exchange busy flag for task management commands */
11802 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11803 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
James Smartc4908502019-01-28 11:14:28 -080011804 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
James Smart0f65ff62010-02-26 14:14:23 -050011805 cur_iocbq);
James Smart324e1c42019-10-18 14:18:21 -070011806 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11807 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11808 else
11809 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
James Smart0f65ff62010-02-26 14:14:23 -050011810 }
11811
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011812 pdone_q = cmdiocbq->context_un.wait_queue;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011813 if (pdone_q)
11814 wake_up(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -050011815 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea31012005-04-17 16:05:31 -050011816 return;
11817}
11818
James Smarte59058c2008-08-24 21:49:00 -040011819/**
James Smartd11e31d2009-06-10 17:23:06 -040011820 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11821 * @phba: Pointer to HBA context object..
11822 * @piocbq: Pointer to command iocb.
11823 * @flag: Flag to test.
11824 *
11825 * This routine grabs the hbalock and then test the iocb_flag to
11826 * see if the passed in flag is set.
11827 * Returns:
11828 * 1 if flag is set.
11829 * 0 if flag is not set.
11830 **/
11831static int
11832lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11833 struct lpfc_iocbq *piocbq, uint32_t flag)
11834{
11835 unsigned long iflags;
11836 int ret;
11837
11838 spin_lock_irqsave(&phba->hbalock, iflags);
11839 ret = piocbq->iocb_flag & flag;
11840 spin_unlock_irqrestore(&phba->hbalock, iflags);
11841 return ret;
11842
11843}
11844
11845/**
James Smart3621a712009-04-06 18:47:14 -040011846 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
James Smarte59058c2008-08-24 21:49:00 -040011847 * @phba: Pointer to HBA context object..
11848 * @pring: Pointer to sli ring.
11849 * @piocb: Pointer to command iocb.
11850 * @prspiocbq: Pointer to response iocb.
11851 * @timeout: Timeout in number of seconds.
11852 *
11853 * This function issues the iocb to firmware and waits for the
James Smart5a0916b2013-07-15 18:31:42 -040011854 * iocb to complete. The iocb_cmpl field of the shall be used
11855 * to handle iocbs which time out. If the field is NULL, the
11856 * function shall free the iocbq structure. If more clean up is
11857 * needed, the caller is expected to provide a completion function
11858 * that will provide the needed clean up. If the iocb command is
11859 * not completed within timeout seconds, the function will either
11860 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11861 * completion function set in the iocb_cmpl field and then return
11862 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11863 * resources if this function returns IOCB_TIMEDOUT.
James Smarte59058c2008-08-24 21:49:00 -040011864 * The function waits for the iocb completion using an
11865 * non-interruptible wait.
11866 * This function will sleep while waiting for iocb completion.
11867 * So, this function should not be called from any context which
11868 * does not allow sleeping. Due to the same reason, this function
11869 * cannot be called with interrupt disabled.
11870 * This function assumes that the iocb completions occur while
11871 * this function sleep. So, this function cannot be called from
11872 * the thread which process iocb completion for this ring.
11873 * This function clears the iocb_flag of the iocb object before
11874 * issuing the iocb and the iocb completion handler sets this
11875 * flag and wakes this thread when the iocb completes.
11876 * The contents of the response iocb will be copied to prspiocbq
11877 * by the completion handler when the command completes.
11878 * This function returns IOCB_SUCCESS when success.
11879 * This function is called with no lock held.
11880 **/
dea31012005-04-17 16:05:31 -050011881int
James Smart2e0fef82007-06-17 19:56:36 -050011882lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
James Smartda0436e2009-05-22 14:51:39 -040011883 uint32_t ring_number,
James Smart2e0fef82007-06-17 19:56:36 -050011884 struct lpfc_iocbq *piocb,
11885 struct lpfc_iocbq *prspiocbq,
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011886 uint32_t timeout)
dea31012005-04-17 16:05:31 -050011887{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080011888 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011889 long timeleft, timeout_req = 0;
11890 int retval = IOCB_SUCCESS;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011891 uint32_t creg_val;
James Smart0e9bb8d2013-03-01 16:35:12 -050011892 struct lpfc_iocbq *iocb;
11893 int txq_cnt = 0;
11894 int txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080011895 struct lpfc_sli_ring *pring;
James Smart5a0916b2013-07-15 18:31:42 -040011896 unsigned long iflags;
11897 bool iocb_completed = true;
11898
James Smart895427b2017-02-12 13:52:30 -080011899 if (phba->sli_rev >= LPFC_SLI_REV4)
11900 pring = lpfc_sli4_calc_ring(phba, piocb);
11901 else
11902 pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -050011903 /*
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011904 * If the caller has provided a response iocbq buffer, then context2
11905 * is NULL or its an error.
dea31012005-04-17 16:05:31 -050011906 */
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011907 if (prspiocbq) {
11908 if (piocb->context2)
11909 return IOCB_ERROR;
11910 piocb->context2 = prspiocbq;
dea31012005-04-17 16:05:31 -050011911 }
11912
James Smart5a0916b2013-07-15 18:31:42 -040011913 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011914 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11915 piocb->context_un.wait_queue = &done_q;
James Smart5a0916b2013-07-15 18:31:42 -040011916 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea31012005-04-17 16:05:31 -050011917
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011918 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050011919 if (lpfc_readl(phba->HCregaddr, &creg_val))
11920 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011921 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11922 writel(creg_val, phba->HCregaddr);
11923 readl(phba->HCregaddr); /* flush */
11924 }
11925
James Smart2a9bf3d2010-06-07 15:24:45 -040011926 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11927 SLI_IOCB_RET_IOCB);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011928 if (retval == IOCB_SUCCESS) {
James Smart256ec0d2013-04-17 20:14:58 -040011929 timeout_req = msecs_to_jiffies(timeout * 1000);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011930 timeleft = wait_event_timeout(done_q,
James Smartd11e31d2009-06-10 17:23:06 -040011931 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011932 timeout_req);
James Smart5a0916b2013-07-15 18:31:42 -040011933 spin_lock_irqsave(&phba->hbalock, iflags);
11934 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
dea31012005-04-17 16:05:31 -050011935
James Smart5a0916b2013-07-15 18:31:42 -040011936 /*
11937 * IOCB timed out. Inform the wake iocb wait
11938 * completion function and set local status
11939 */
11940
11941 iocb_completed = false;
11942 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11943 }
11944 spin_unlock_irqrestore(&phba->hbalock, iflags);
11945 if (iocb_completed) {
James Smart7054a602007-04-25 09:52:34 -040011946 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011947 "0331 IOCB wake signaled\n");
James Smart53151bb2013-10-10 12:24:07 -040011948 /* Note: we are not indicating if the IOCB has a success
11949 * status or not - that's for the caller to check.
11950 * IOCB_SUCCESS means just that the command was sent and
11951 * completed. Not that it completed successfully.
11952 * */
James Smart7054a602007-04-25 09:52:34 -040011953 } else if (timeleft == 0) {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011954 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011955 "0338 IOCB wait timeout error - no "
11956 "wake response Data x%x\n", timeout);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011957 retval = IOCB_TIMEDOUT;
James Smart7054a602007-04-25 09:52:34 -040011958 } else {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011959 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011960 "0330 IOCB wake NOT set, "
11961 "Data x%x x%lx\n",
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011962 timeout, (timeleft / jiffies));
11963 retval = IOCB_TIMEDOUT;
dea31012005-04-17 16:05:31 -050011964 }
James Smart2a9bf3d2010-06-07 15:24:45 -040011965 } else if (retval == IOCB_BUSY) {
James Smart0e9bb8d2013-03-01 16:35:12 -050011966 if (phba->cfg_log_verbose & LOG_SLI) {
11967 list_for_each_entry(iocb, &pring->txq, list) {
11968 txq_cnt++;
11969 }
11970 list_for_each_entry(iocb, &pring->txcmplq, list) {
11971 txcmplq_cnt++;
11972 }
11973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11974 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11975 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11976 }
James Smart2a9bf3d2010-06-07 15:24:45 -040011977 return retval;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011978 } else {
11979 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040011980 "0332 IOCB wait issue failed, Data x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011981 retval);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011982 retval = IOCB_ERROR;
dea31012005-04-17 16:05:31 -050011983 }
11984
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011985 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050011986 if (lpfc_readl(phba->HCregaddr, &creg_val))
11987 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011988 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11989 writel(creg_val, phba->HCregaddr);
11990 readl(phba->HCregaddr); /* flush */
11991 }
11992
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011993 if (prspiocbq)
11994 piocb->context2 = NULL;
11995
11996 piocb->context_un.wait_queue = NULL;
11997 piocb->iocb_cmpl = NULL;
dea31012005-04-17 16:05:31 -050011998 return retval;
11999}
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012000
James Smarte59058c2008-08-24 21:49:00 -040012001/**
James Smart3621a712009-04-06 18:47:14 -040012002 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
James Smarte59058c2008-08-24 21:49:00 -040012003 * @phba: Pointer to HBA context object.
12004 * @pmboxq: Pointer to driver mailbox object.
12005 * @timeout: Timeout in number of seconds.
12006 *
12007 * This function issues the mailbox to firmware and waits for the
12008 * mailbox command to complete. If the mailbox command is not
12009 * completed within timeout seconds, it returns MBX_TIMEOUT.
12010 * The function waits for the mailbox completion using an
12011 * interruptible wait. If the thread is woken up due to a
12012 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12013 * should not free the mailbox resources, if this function returns
12014 * MBX_TIMEOUT.
12015 * This function will sleep while waiting for mailbox completion.
12016 * So, this function should not be called from any context which
12017 * does not allow sleeping. Due to the same reason, this function
12018 * cannot be called with interrupt disabled.
12019 * This function assumes that the mailbox completion occurs while
12020 * this function sleep. So, this function cannot be called from
12021 * the worker thread which processes mailbox completion.
12022 * This function is called in the context of HBA management
12023 * applications.
12024 * This function returns MBX_SUCCESS when successful.
12025 * This function is called with no lock held.
12026 **/
dea31012005-04-17 16:05:31 -050012027int
James Smart2e0fef82007-06-17 19:56:36 -050012028lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea31012005-04-17 16:05:31 -050012029 uint32_t timeout)
12030{
James Smarte29d74f2018-03-05 12:04:07 -080012031 struct completion mbox_done;
dea31012005-04-17 16:05:31 -050012032 int retval;
James Smart858c9f62007-06-17 19:56:39 -050012033 unsigned long flag;
dea31012005-04-17 16:05:31 -050012034
James Smart495a7142008-06-14 22:52:59 -040012035 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea31012005-04-17 16:05:31 -050012036 /* setup wake call as IOCB callback */
12037 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea31012005-04-17 16:05:31 -050012038
James Smarte29d74f2018-03-05 12:04:07 -080012039 /* setup context3 field to pass wait_queue pointer to wake function */
12040 init_completion(&mbox_done);
12041 pmboxq->context3 = &mbox_done;
dea31012005-04-17 16:05:31 -050012042 /* now issue the command */
12043 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea31012005-04-17 16:05:31 -050012044 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
James Smarte29d74f2018-03-05 12:04:07 -080012045 wait_for_completion_timeout(&mbox_done,
12046 msecs_to_jiffies(timeout * 1000));
James Smart7054a602007-04-25 09:52:34 -040012047
James Smart858c9f62007-06-17 19:56:39 -050012048 spin_lock_irqsave(&phba->hbalock, flag);
James Smarte29d74f2018-03-05 12:04:07 -080012049 pmboxq->context3 = NULL;
James Smart7054a602007-04-25 09:52:34 -040012050 /*
12051 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12052 * else do not free the resources.
12053 */
James Smartd7c47992010-06-08 18:31:54 -040012054 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea31012005-04-17 16:05:31 -050012055 retval = MBX_SUCCESS;
James Smartd7c47992010-06-08 18:31:54 -040012056 } else {
James Smart7054a602007-04-25 09:52:34 -040012057 retval = MBX_TIMEOUT;
James Smart858c9f62007-06-17 19:56:39 -050012058 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12059 }
12060 spin_unlock_irqrestore(&phba->hbalock, flag);
dea31012005-04-17 16:05:31 -050012061 }
dea31012005-04-17 16:05:31 -050012062 return retval;
12063}
12064
James Smarte59058c2008-08-24 21:49:00 -040012065/**
James Smart3772a992009-05-22 14:50:54 -040012066 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
James Smarte59058c2008-08-24 21:49:00 -040012067 * @phba: Pointer to HBA context.
12068 *
James Smart3772a992009-05-22 14:50:54 -040012069 * This function is called to shutdown the driver's mailbox sub-system.
12070 * It first marks the mailbox sub-system is in a block state to prevent
12071 * the asynchronous mailbox command from issued off the pending mailbox
12072 * command queue. If the mailbox command sub-system shutdown is due to
12073 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12074 * the mailbox sub-system flush routine to forcefully bring down the
12075 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12076 * as with offline or HBA function reset), this routine will wait for the
12077 * outstanding mailbox command to complete before invoking the mailbox
12078 * sub-system flush routine to gracefully bring down mailbox sub-system.
James Smarte59058c2008-08-24 21:49:00 -040012079 **/
James Smart3772a992009-05-22 14:50:54 -040012080void
James Smart618a5232012-06-12 13:54:36 -040012081lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
James Smartb4c02652006-07-06 15:50:43 -040012082{
James Smart3772a992009-05-22 14:50:54 -040012083 struct lpfc_sli *psli = &phba->sli;
James Smart3772a992009-05-22 14:50:54 -040012084 unsigned long timeout;
12085
James Smart618a5232012-06-12 13:54:36 -040012086 if (mbx_action == LPFC_MBX_NO_WAIT) {
12087 /* delay 100ms for port state */
12088 msleep(100);
12089 lpfc_sli_mbox_sys_flush(phba);
12090 return;
12091 }
James Smarta183a152011-10-10 21:32:43 -040012092 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
James Smartd7069f02012-03-01 22:36:29 -050012093
James Smart523128e2018-09-10 10:30:46 -070012094 /* Disable softirqs, including timers from obtaining phba->hbalock */
12095 local_bh_disable();
12096
James Smart3772a992009-05-22 14:50:54 -040012097 spin_lock_irq(&phba->hbalock);
12098 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smart3772a992009-05-22 14:50:54 -040012099
12100 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart3772a992009-05-22 14:50:54 -040012101 /* Determine how long we might wait for the active mailbox
12102 * command to be gracefully completed by firmware.
12103 */
James Smarta183a152011-10-10 21:32:43 -040012104 if (phba->sli.mbox_active)
12105 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12106 phba->sli.mbox_active) *
12107 1000) + jiffies;
12108 spin_unlock_irq(&phba->hbalock);
12109
James Smart523128e2018-09-10 10:30:46 -070012110 /* Enable softirqs again, done with phba->hbalock */
12111 local_bh_enable();
12112
James Smart3772a992009-05-22 14:50:54 -040012113 while (phba->sli.mbox_active) {
12114 /* Check active mailbox complete status every 2ms */
12115 msleep(2);
12116 if (time_after(jiffies, timeout))
12117 /* Timeout, let the mailbox flush routine to
12118 * forcefully release active mailbox command
12119 */
12120 break;
12121 }
James Smart523128e2018-09-10 10:30:46 -070012122 } else {
James Smartd7069f02012-03-01 22:36:29 -050012123 spin_unlock_irq(&phba->hbalock);
12124
James Smart523128e2018-09-10 10:30:46 -070012125 /* Enable softirqs again, done with phba->hbalock */
12126 local_bh_enable();
12127 }
12128
James Smart3772a992009-05-22 14:50:54 -040012129 lpfc_sli_mbox_sys_flush(phba);
12130}
12131
12132/**
12133 * lpfc_sli_eratt_read - read sli-3 error attention events
12134 * @phba: Pointer to HBA context.
12135 *
12136 * This function is called to read the SLI3 device error attention registers
12137 * for possible error attention events. The caller must hold the hostlock
12138 * with spin_lock_irq().
12139 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012140 * This function returns 1 when there is Error Attention in the Host Attention
James Smart3772a992009-05-22 14:50:54 -040012141 * Register and returns 0 otherwise.
12142 **/
12143static int
12144lpfc_sli_eratt_read(struct lpfc_hba *phba)
12145{
James Smarted957682007-06-17 19:56:37 -050012146 uint32_t ha_copy;
James Smartb4c02652006-07-06 15:50:43 -040012147
James Smart3772a992009-05-22 14:50:54 -040012148 /* Read chip Host Attention (HA) register */
James Smart9940b972011-03-11 16:06:12 -050012149 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12150 goto unplug_err;
12151
James Smart3772a992009-05-22 14:50:54 -040012152 if (ha_copy & HA_ERATT) {
12153 /* Read host status register to retrieve error event */
James Smart9940b972011-03-11 16:06:12 -050012154 if (lpfc_sli_read_hs(phba))
12155 goto unplug_err;
James Smartb4c02652006-07-06 15:50:43 -040012156
James Smart3772a992009-05-22 14:50:54 -040012157 /* Check if there is a deferred error condition is active */
12158 if ((HS_FFER1 & phba->work_hs) &&
12159 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040012160 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
James Smart3772a992009-05-22 14:50:54 -040012161 phba->hba_flag |= DEFER_ERATT;
James Smart3772a992009-05-22 14:50:54 -040012162 /* Clear all interrupt enable conditions */
12163 writel(0, phba->HCregaddr);
12164 readl(phba->HCregaddr);
12165 }
12166
12167 /* Set the driver HA work bitmap */
James Smart3772a992009-05-22 14:50:54 -040012168 phba->work_ha |= HA_ERATT;
12169 /* Indicate polling handles this ERATT */
12170 phba->hba_flag |= HBA_ERATT_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040012171 return 1;
James Smartb4c02652006-07-06 15:50:43 -040012172 }
James Smart3772a992009-05-22 14:50:54 -040012173 return 0;
James Smart9940b972011-03-11 16:06:12 -050012174
12175unplug_err:
12176 /* Set the driver HS work bitmap */
12177 phba->work_hs |= UNPLUG_ERR;
12178 /* Set the driver HA work bitmap */
12179 phba->work_ha |= HA_ERATT;
12180 /* Indicate polling handles this ERATT */
12181 phba->hba_flag |= HBA_ERATT_HANDLED;
12182 return 1;
James Smartb4c02652006-07-06 15:50:43 -040012183}
12184
James Smarte59058c2008-08-24 21:49:00 -040012185/**
James Smartda0436e2009-05-22 14:51:39 -040012186 * lpfc_sli4_eratt_read - read sli-4 error attention events
12187 * @phba: Pointer to HBA context.
12188 *
12189 * This function is called to read the SLI4 device error attention registers
12190 * for possible error attention events. The caller must hold the hostlock
12191 * with spin_lock_irq().
12192 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012193 * This function returns 1 when there is Error Attention in the Host Attention
James Smartda0436e2009-05-22 14:51:39 -040012194 * Register and returns 0 otherwise.
12195 **/
12196static int
12197lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12198{
12199 uint32_t uerr_sta_hi, uerr_sta_lo;
James Smart2fcee4b2010-12-15 17:57:46 -050012200 uint32_t if_type, portsmphr;
12201 struct lpfc_register portstat_reg;
James Smartda0436e2009-05-22 14:51:39 -040012202
James Smart2fcee4b2010-12-15 17:57:46 -050012203 /*
12204 * For now, use the SLI4 device internal unrecoverable error
James Smartda0436e2009-05-22 14:51:39 -040012205 * registers for error attention. This can be changed later.
12206 */
James Smart2fcee4b2010-12-15 17:57:46 -050012207 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12208 switch (if_type) {
12209 case LPFC_SLI_INTF_IF_TYPE_0:
James Smart9940b972011-03-11 16:06:12 -050012210 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12211 &uerr_sta_lo) ||
12212 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12213 &uerr_sta_hi)) {
12214 phba->work_hs |= UNPLUG_ERR;
12215 phba->work_ha |= HA_ERATT;
12216 phba->hba_flag |= HBA_ERATT_HANDLED;
12217 return 1;
12218 }
James Smart2fcee4b2010-12-15 17:57:46 -050012219 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12220 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12222 "1423 HBA Unrecoverable error: "
12223 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12224 "ue_mask_lo_reg=0x%x, "
12225 "ue_mask_hi_reg=0x%x\n",
12226 uerr_sta_lo, uerr_sta_hi,
12227 phba->sli4_hba.ue_mask_lo,
12228 phba->sli4_hba.ue_mask_hi);
12229 phba->work_status[0] = uerr_sta_lo;
12230 phba->work_status[1] = uerr_sta_hi;
12231 phba->work_ha |= HA_ERATT;
12232 phba->hba_flag |= HBA_ERATT_HANDLED;
12233 return 1;
12234 }
12235 break;
12236 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart27d6ac02018-02-22 08:18:42 -080012237 case LPFC_SLI_INTF_IF_TYPE_6:
James Smart9940b972011-03-11 16:06:12 -050012238 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12239 &portstat_reg.word0) ||
12240 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12241 &portsmphr)){
12242 phba->work_hs |= UNPLUG_ERR;
12243 phba->work_ha |= HA_ERATT;
12244 phba->hba_flag |= HBA_ERATT_HANDLED;
12245 return 1;
12246 }
James Smart2fcee4b2010-12-15 17:57:46 -050012247 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12248 phba->work_status[0] =
12249 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12250 phba->work_status[1] =
12251 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2e90f4b2011-12-13 13:22:37 -050012253 "2885 Port Status Event: "
James Smart2fcee4b2010-12-15 17:57:46 -050012254 "port status reg 0x%x, "
12255 "port smphr reg 0x%x, "
12256 "error 1=0x%x, error 2=0x%x\n",
12257 portstat_reg.word0,
12258 portsmphr,
12259 phba->work_status[0],
12260 phba->work_status[1]);
12261 phba->work_ha |= HA_ERATT;
12262 phba->hba_flag |= HBA_ERATT_HANDLED;
12263 return 1;
12264 }
12265 break;
12266 case LPFC_SLI_INTF_IF_TYPE_1:
12267 default:
James Smarta747c9c2009-11-18 15:41:10 -050012268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2fcee4b2010-12-15 17:57:46 -050012269 "2886 HBA Error Attention on unsupported "
12270 "if type %d.", if_type);
James Smarta747c9c2009-11-18 15:41:10 -050012271 return 1;
James Smartda0436e2009-05-22 14:51:39 -040012272 }
James Smart2fcee4b2010-12-15 17:57:46 -050012273
James Smartda0436e2009-05-22 14:51:39 -040012274 return 0;
12275}
12276
12277/**
James Smart3621a712009-04-06 18:47:14 -040012278 * lpfc_sli_check_eratt - check error attention events
James Smart93996272008-08-24 21:50:30 -040012279 * @phba: Pointer to HBA context.
12280 *
James Smart3772a992009-05-22 14:50:54 -040012281 * This function is called from timer soft interrupt context to check HBA's
James Smart93996272008-08-24 21:50:30 -040012282 * error attention register bit for error attention events.
12283 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012284 * This function returns 1 when there is Error Attention in the Host Attention
James Smart93996272008-08-24 21:50:30 -040012285 * Register and returns 0 otherwise.
12286 **/
12287int
12288lpfc_sli_check_eratt(struct lpfc_hba *phba)
12289{
12290 uint32_t ha_copy;
12291
12292 /* If somebody is waiting to handle an eratt, don't process it
12293 * here. The brdkill function will do this.
12294 */
12295 if (phba->link_flag & LS_IGNORE_ERATT)
12296 return 0;
12297
12298 /* Check if interrupt handler handles this ERATT */
12299 spin_lock_irq(&phba->hbalock);
12300 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12301 /* Interrupt handler has handled ERATT */
12302 spin_unlock_irq(&phba->hbalock);
12303 return 0;
12304 }
12305
James Smarta257bf92009-04-06 18:48:10 -040012306 /*
12307 * If there is deferred error attention, do not check for error
12308 * attention
12309 */
12310 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12311 spin_unlock_irq(&phba->hbalock);
12312 return 0;
12313 }
12314
James Smart3772a992009-05-22 14:50:54 -040012315 /* If PCI channel is offline, don't process it */
12316 if (unlikely(pci_channel_offline(phba->pcidev))) {
James Smart93996272008-08-24 21:50:30 -040012317 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040012318 return 0;
12319 }
12320
12321 switch (phba->sli_rev) {
12322 case LPFC_SLI_REV2:
12323 case LPFC_SLI_REV3:
12324 /* Read chip Host Attention (HA) register */
12325 ha_copy = lpfc_sli_eratt_read(phba);
12326 break;
James Smartda0436e2009-05-22 14:51:39 -040012327 case LPFC_SLI_REV4:
James Smart2fcee4b2010-12-15 17:57:46 -050012328 /* Read device Uncoverable Error (UERR) registers */
James Smartda0436e2009-05-22 14:51:39 -040012329 ha_copy = lpfc_sli4_eratt_read(phba);
12330 break;
James Smart3772a992009-05-22 14:50:54 -040012331 default:
12332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12333 "0299 Invalid SLI revision (%d)\n",
12334 phba->sli_rev);
12335 ha_copy = 0;
12336 break;
James Smart93996272008-08-24 21:50:30 -040012337 }
12338 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040012339
12340 return ha_copy;
12341}
12342
12343/**
12344 * lpfc_intr_state_check - Check device state for interrupt handling
12345 * @phba: Pointer to HBA context.
12346 *
12347 * This inline routine checks whether a device or its PCI slot is in a state
12348 * that the interrupt should be handled.
12349 *
12350 * This function returns 0 if the device or the PCI slot is in a state that
12351 * interrupt should be handled, otherwise -EIO.
12352 */
12353static inline int
12354lpfc_intr_state_check(struct lpfc_hba *phba)
12355{
12356 /* If the pci channel is offline, ignore all the interrupts */
12357 if (unlikely(pci_channel_offline(phba->pcidev)))
12358 return -EIO;
12359
12360 /* Update device level interrupt statistics */
12361 phba->sli.slistat.sli_intr++;
12362
12363 /* Ignore all interrupts during initialization. */
12364 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12365 return -EIO;
12366
James Smart93996272008-08-24 21:50:30 -040012367 return 0;
12368}
12369
12370/**
James Smart3772a992009-05-22 14:50:54 -040012371 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
James Smarte59058c2008-08-24 21:49:00 -040012372 * @irq: Interrupt number.
12373 * @dev_id: The device context pointer.
12374 *
James Smart93996272008-08-24 21:50:30 -040012375 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040012376 * service routine when device with SLI-3 interface spec is enabled with
12377 * MSI-X multi-message interrupt mode and there are slow-path events in
12378 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12379 * interrupt mode, this function is called as part of the device-level
12380 * interrupt handler. When the PCI slot is in error recovery or the HBA
12381 * is undergoing initialization, the interrupt handler will not process
12382 * the interrupt. The link attention and ELS ring attention events are
12383 * handled by the worker thread. The interrupt handler signals the worker
12384 * thread and returns for these events. This function is called without
12385 * any lock held. It gets the hbalock to access and update SLI data
James Smart93996272008-08-24 21:50:30 -040012386 * structures.
12387 *
12388 * This function returns IRQ_HANDLED when interrupt is handled else it
12389 * returns IRQ_NONE.
James Smarte59058c2008-08-24 21:49:00 -040012390 **/
dea31012005-04-17 16:05:31 -050012391irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040012392lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea31012005-04-17 16:05:31 -050012393{
James Smart2e0fef82007-06-17 19:56:36 -050012394 struct lpfc_hba *phba;
James Smarta747c9c2009-11-18 15:41:10 -050012395 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -050012396 uint32_t work_ha_copy;
12397 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050012398 unsigned long iflag;
dea31012005-04-17 16:05:31 -050012399 uint32_t control;
12400
James Smart92d7f7b2007-06-17 19:56:38 -050012401 MAILBOX_t *mbox, *pmbox;
James Smart858c9f62007-06-17 19:56:39 -050012402 struct lpfc_vport *vport;
12403 struct lpfc_nodelist *ndlp;
12404 struct lpfc_dmabuf *mp;
James Smart92d7f7b2007-06-17 19:56:38 -050012405 LPFC_MBOXQ_t *pmb;
12406 int rc;
12407
dea31012005-04-17 16:05:31 -050012408 /*
12409 * Get the driver's phba structure from the dev_id and
12410 * assume the HBA is not interrupting.
12411 */
James Smart93996272008-08-24 21:50:30 -040012412 phba = (struct lpfc_hba *)dev_id;
dea31012005-04-17 16:05:31 -050012413
12414 if (unlikely(!phba))
12415 return IRQ_NONE;
12416
dea31012005-04-17 16:05:31 -050012417 /*
James Smart93996272008-08-24 21:50:30 -040012418 * Stuff needs to be attented to when this function is invoked as an
12419 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050012420 */
James Smart93996272008-08-24 21:50:30 -040012421 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040012422 /* Check device state for handling interrupt */
12423 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012424 return IRQ_NONE;
12425 /* Need to read HA REG for slow-path events */
James Smart5b75da22008-12-04 22:39:35 -050012426 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050012427 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12428 goto unplug_error;
James Smart93996272008-08-24 21:50:30 -040012429 /* If somebody is waiting to handle an eratt don't process it
12430 * here. The brdkill function will do this.
12431 */
12432 if (phba->link_flag & LS_IGNORE_ERATT)
12433 ha_copy &= ~HA_ERATT;
12434 /* Check the need for handling ERATT in interrupt handler */
12435 if (ha_copy & HA_ERATT) {
12436 if (phba->hba_flag & HBA_ERATT_HANDLED)
12437 /* ERATT polling has handled ERATT */
12438 ha_copy &= ~HA_ERATT;
12439 else
12440 /* Indicate interrupt handler handles ERATT */
12441 phba->hba_flag |= HBA_ERATT_HANDLED;
12442 }
James Smarta257bf92009-04-06 18:48:10 -040012443
12444 /*
12445 * If there is deferred error attention, do not check for any
12446 * interrupt.
12447 */
12448 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040012449 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012450 return IRQ_NONE;
12451 }
12452
James Smart93996272008-08-24 21:50:30 -040012453 /* Clear up only attention source related to slow-path */
James Smart9940b972011-03-11 16:06:12 -050012454 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12455 goto unplug_error;
12456
James Smarta747c9c2009-11-18 15:41:10 -050012457 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12458 HC_LAINT_ENA | HC_ERINT_ENA),
12459 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012460 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12461 phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050012462 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012463 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050012464 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040012465 } else
12466 ha_copy = phba->ha_copy;
dea31012005-04-17 16:05:31 -050012467
dea31012005-04-17 16:05:31 -050012468 work_ha_copy = ha_copy & phba->work_ha_mask;
12469
James Smart93996272008-08-24 21:50:30 -040012470 if (work_ha_copy) {
dea31012005-04-17 16:05:31 -050012471 if (work_ha_copy & HA_LATT) {
12472 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12473 /*
12474 * Turn off Link Attention interrupts
12475 * until CLEAR_LA done
12476 */
James Smart5b75da22008-12-04 22:39:35 -050012477 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012478 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
James Smart9940b972011-03-11 16:06:12 -050012479 if (lpfc_readl(phba->HCregaddr, &control))
12480 goto unplug_error;
dea31012005-04-17 16:05:31 -050012481 control &= ~HC_LAINT_ENA;
12482 writel(control, phba->HCregaddr);
12483 readl(phba->HCregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050012484 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012485 }
12486 else
12487 work_ha_copy &= ~HA_LATT;
12488 }
12489
James Smart93996272008-08-24 21:50:30 -040012490 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
James Smart858c9f62007-06-17 19:56:39 -050012491 /*
12492 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12493 * the only slow ring.
12494 */
12495 status = (work_ha_copy &
12496 (HA_RXMASK << (4*LPFC_ELS_RING)));
12497 status >>= (4*LPFC_ELS_RING);
12498 if (status & HA_RXMASK) {
James Smart5b75da22008-12-04 22:39:35 -050012499 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050012500 if (lpfc_readl(phba->HCregaddr, &control))
12501 goto unplug_error;
James Smarta58cbd52007-08-02 11:09:43 -040012502
12503 lpfc_debugfs_slow_ring_trc(phba,
12504 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12505 control, status,
12506 (uint32_t)phba->sli.slistat.sli_intr);
12507
James Smart858c9f62007-06-17 19:56:39 -050012508 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
James Smarta58cbd52007-08-02 11:09:43 -040012509 lpfc_debugfs_slow_ring_trc(phba,
12510 "ISR Disable ring:"
12511 "pwork:x%x hawork:x%x wait:x%x",
12512 phba->work_ha, work_ha_copy,
12513 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040012514 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040012515
James Smart858c9f62007-06-17 19:56:39 -050012516 control &=
12517 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea31012005-04-17 16:05:31 -050012518 writel(control, phba->HCregaddr);
12519 readl(phba->HCregaddr); /* flush */
dea31012005-04-17 16:05:31 -050012520 }
James Smarta58cbd52007-08-02 11:09:43 -040012521 else {
12522 lpfc_debugfs_slow_ring_trc(phba,
12523 "ISR slow ring: pwork:"
12524 "x%x hawork:x%x wait:x%x",
12525 phba->work_ha, work_ha_copy,
12526 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040012527 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040012528 }
James Smart5b75da22008-12-04 22:39:35 -050012529 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012530 }
12531 }
James Smart5b75da22008-12-04 22:39:35 -050012532 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012533 if (work_ha_copy & HA_ERATT) {
James Smart9940b972011-03-11 16:06:12 -050012534 if (lpfc_sli_read_hs(phba))
12535 goto unplug_error;
James Smarta257bf92009-04-06 18:48:10 -040012536 /*
12537 * Check if there is a deferred error condition
12538 * is active
12539 */
12540 if ((HS_FFER1 & phba->work_hs) &&
12541 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040012542 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12543 phba->work_hs)) {
James Smarta257bf92009-04-06 18:48:10 -040012544 phba->hba_flag |= DEFER_ERATT;
12545 /* Clear all interrupt enable conditions */
12546 writel(0, phba->HCregaddr);
12547 readl(phba->HCregaddr);
12548 }
12549 }
12550
James Smart93996272008-08-24 21:50:30 -040012551 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
James Smart92d7f7b2007-06-17 19:56:38 -050012552 pmb = phba->sli.mbox_active;
James Smart04c68492009-05-22 14:52:52 -040012553 pmbox = &pmb->u.mb;
James Smart34b02dc2008-08-24 21:49:55 -040012554 mbox = phba->mbox;
James Smart858c9f62007-06-17 19:56:39 -050012555 vport = pmb->vport;
James Smart92d7f7b2007-06-17 19:56:38 -050012556
12557 /* First check out the status word */
12558 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12559 if (pmbox->mbxOwner != OWN_HOST) {
James Smart5b75da22008-12-04 22:39:35 -050012560 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -050012561 /*
12562 * Stray Mailbox Interrupt, mbxCommand <cmd>
12563 * mbxStatus <status>
12564 */
James Smart09372822008-01-11 01:52:54 -050012565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
James Smart92d7f7b2007-06-17 19:56:38 -050012566 LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040012567 "(%d):0304 Stray Mailbox "
James Smart92d7f7b2007-06-17 19:56:38 -050012568 "Interrupt mbxCommand x%x "
12569 "mbxStatus x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040012570 (vport ? vport->vpi : 0),
James Smart92d7f7b2007-06-17 19:56:38 -050012571 pmbox->mbxCommand,
12572 pmbox->mbxStatus);
James Smart09372822008-01-11 01:52:54 -050012573 /* clear mailbox attention bit */
12574 work_ha_copy &= ~HA_MBATT;
12575 } else {
James Smart97eab632008-04-07 10:16:05 -040012576 phba->sli.mbox_active = NULL;
James Smart5b75da22008-12-04 22:39:35 -050012577 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart09372822008-01-11 01:52:54 -050012578 phba->last_completion_time = jiffies;
12579 del_timer(&phba->sli.mbox_tmo);
James Smart09372822008-01-11 01:52:54 -050012580 if (pmb->mbox_cmpl) {
12581 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12582 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -040012583 if (pmb->out_ext_byte_len &&
James Smart3e1f0712018-11-29 16:09:29 -080012584 pmb->ctx_buf)
James Smart7a470272010-03-15 11:25:20 -040012585 lpfc_sli_pcimem_bcopy(
12586 phba->mbox_ext,
James Smart3e1f0712018-11-29 16:09:29 -080012587 pmb->ctx_buf,
James Smart7a470272010-03-15 11:25:20 -040012588 pmb->out_ext_byte_len);
James Smart858c9f62007-06-17 19:56:39 -050012589 }
James Smart09372822008-01-11 01:52:54 -050012590 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12591 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12592
12593 lpfc_debugfs_disc_trc(vport,
12594 LPFC_DISC_TRC_MBOX_VPORT,
12595 "MBOX dflt rpi: : "
12596 "status:x%x rpi:x%x",
12597 (uint32_t)pmbox->mbxStatus,
12598 pmbox->un.varWords[0], 0);
12599
12600 if (!pmbox->mbxStatus) {
12601 mp = (struct lpfc_dmabuf *)
James Smart3e1f0712018-11-29 16:09:29 -080012602 (pmb->ctx_buf);
James Smart09372822008-01-11 01:52:54 -050012603 ndlp = (struct lpfc_nodelist *)
James Smart3e1f0712018-11-29 16:09:29 -080012604 pmb->ctx_ndlp;
James Smart09372822008-01-11 01:52:54 -050012605
12606 /* Reg_LOGIN of dflt RPI was
12607 * successful. new lets get
12608 * rid of the RPI using the
12609 * same mbox buffer.
12610 */
12611 lpfc_unreg_login(phba,
12612 vport->vpi,
12613 pmbox->un.varWords[0],
12614 pmb);
12615 pmb->mbox_cmpl =
12616 lpfc_mbx_cmpl_dflt_rpi;
James Smart3e1f0712018-11-29 16:09:29 -080012617 pmb->ctx_buf = mp;
12618 pmb->ctx_ndlp = ndlp;
James Smart09372822008-01-11 01:52:54 -050012619 pmb->vport = vport;
James Smart58da1ff2008-04-07 10:15:56 -040012620 rc = lpfc_sli_issue_mbox(phba,
12621 pmb,
12622 MBX_NOWAIT);
12623 if (rc != MBX_BUSY)
12624 lpfc_printf_log(phba,
12625 KERN_ERR,
12626 LOG_MBOX | LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040012627 "0350 rc should have"
James Smart6a9c52c2009-10-02 15:16:51 -040012628 "been MBX_BUSY\n");
James Smart3772a992009-05-22 14:50:54 -040012629 if (rc != MBX_NOT_FINISHED)
12630 goto send_current_mbox;
James Smart09372822008-01-11 01:52:54 -050012631 }
12632 }
James Smart5b75da22008-12-04 22:39:35 -050012633 spin_lock_irqsave(
12634 &phba->pport->work_port_lock,
12635 iflag);
James Smart09372822008-01-11 01:52:54 -050012636 phba->pport->work_port_events &=
12637 ~WORKER_MBOX_TMO;
James Smart5b75da22008-12-04 22:39:35 -050012638 spin_unlock_irqrestore(
12639 &phba->pport->work_port_lock,
12640 iflag);
James Smart09372822008-01-11 01:52:54 -050012641 lpfc_mbox_cmpl_put(phba, pmb);
James Smart858c9f62007-06-17 19:56:39 -050012642 }
James Smart97eab632008-04-07 10:16:05 -040012643 } else
James Smart5b75da22008-12-04 22:39:35 -050012644 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040012645
James Smart92d7f7b2007-06-17 19:56:38 -050012646 if ((work_ha_copy & HA_MBATT) &&
12647 (phba->sli.mbox_active == NULL)) {
James Smart858c9f62007-06-17 19:56:39 -050012648send_current_mbox:
James Smart92d7f7b2007-06-17 19:56:38 -050012649 /* Process next mailbox command if there is one */
James Smart58da1ff2008-04-07 10:15:56 -040012650 do {
12651 rc = lpfc_sli_issue_mbox(phba, NULL,
12652 MBX_NOWAIT);
12653 } while (rc == MBX_NOT_FINISHED);
12654 if (rc != MBX_SUCCESS)
12655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12656 LOG_SLI, "0349 rc should be "
James Smart6a9c52c2009-10-02 15:16:51 -040012657 "MBX_SUCCESS\n");
James Smart92d7f7b2007-06-17 19:56:38 -050012658 }
12659
James Smart5b75da22008-12-04 22:39:35 -050012660 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012661 phba->work_ha |= work_ha_copy;
James Smart5b75da22008-12-04 22:39:35 -050012662 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart5e9d9b82008-06-14 22:52:53 -040012663 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -050012664 }
James Smart93996272008-08-24 21:50:30 -040012665 return IRQ_HANDLED;
James Smart9940b972011-03-11 16:06:12 -050012666unplug_error:
12667 spin_unlock_irqrestore(&phba->hbalock, iflag);
12668 return IRQ_HANDLED;
dea31012005-04-17 16:05:31 -050012669
James Smart3772a992009-05-22 14:50:54 -040012670} /* lpfc_sli_sp_intr_handler */
James Smart93996272008-08-24 21:50:30 -040012671
12672/**
James Smart3772a992009-05-22 14:50:54 -040012673 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
James Smart93996272008-08-24 21:50:30 -040012674 * @irq: Interrupt number.
12675 * @dev_id: The device context pointer.
12676 *
12677 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040012678 * service routine when device with SLI-3 interface spec is enabled with
12679 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12680 * ring event in the HBA. However, when the device is enabled with either
12681 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12682 * device-level interrupt handler. When the PCI slot is in error recovery
12683 * or the HBA is undergoing initialization, the interrupt handler will not
12684 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12685 * the intrrupt context. This function is called without any lock held.
12686 * It gets the hbalock to access and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040012687 *
12688 * This function returns IRQ_HANDLED when interrupt is handled else it
12689 * returns IRQ_NONE.
12690 **/
12691irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040012692lpfc_sli_fp_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040012693{
12694 struct lpfc_hba *phba;
12695 uint32_t ha_copy;
12696 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050012697 unsigned long iflag;
James Smart895427b2017-02-12 13:52:30 -080012698 struct lpfc_sli_ring *pring;
James Smart93996272008-08-24 21:50:30 -040012699
12700 /* Get the driver's phba structure from the dev_id and
12701 * assume the HBA is not interrupting.
12702 */
12703 phba = (struct lpfc_hba *) dev_id;
12704
12705 if (unlikely(!phba))
12706 return IRQ_NONE;
dea31012005-04-17 16:05:31 -050012707
12708 /*
James Smart93996272008-08-24 21:50:30 -040012709 * Stuff needs to be attented to when this function is invoked as an
12710 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050012711 */
James Smart93996272008-08-24 21:50:30 -040012712 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040012713 /* Check device state for handling interrupt */
12714 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012715 return IRQ_NONE;
12716 /* Need to read HA REG for FCP ring and other ring events */
James Smart9940b972011-03-11 16:06:12 -050012717 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12718 return IRQ_HANDLED;
James Smart93996272008-08-24 21:50:30 -040012719 /* Clear up only attention source related to fast-path */
James Smart5b75da22008-12-04 22:39:35 -050012720 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012721 /*
12722 * If there is deferred error attention, do not check for
12723 * any interrupt.
12724 */
12725 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040012726 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012727 return IRQ_NONE;
12728 }
James Smart93996272008-08-24 21:50:30 -040012729 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12730 phba->HAregaddr);
12731 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050012732 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040012733 } else
12734 ha_copy = phba->ha_copy;
12735
12736 /*
12737 * Process all events on FCP ring. Take the optimized path for FCP IO.
12738 */
12739 ha_copy &= ~(phba->work_ha_mask);
12740
12741 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea31012005-04-17 16:05:31 -050012742 status >>= (4*LPFC_FCP_RING);
James Smart895427b2017-02-12 13:52:30 -080012743 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
James Smart858c9f62007-06-17 19:56:39 -050012744 if (status & HA_RXMASK)
James Smart895427b2017-02-12 13:52:30 -080012745 lpfc_sli_handle_fast_ring_event(phba, pring, status);
James Smarta4bc3372006-12-02 13:34:16 -050012746
12747 if (phba->cfg_multi_ring_support == 2) {
12748 /*
James Smart93996272008-08-24 21:50:30 -040012749 * Process all events on extra ring. Take the optimized path
12750 * for extra ring IO.
James Smarta4bc3372006-12-02 13:34:16 -050012751 */
James Smart93996272008-08-24 21:50:30 -040012752 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
James Smarta4bc3372006-12-02 13:34:16 -050012753 status >>= (4*LPFC_EXTRA_RING);
James Smart858c9f62007-06-17 19:56:39 -050012754 if (status & HA_RXMASK) {
James Smarta4bc3372006-12-02 13:34:16 -050012755 lpfc_sli_handle_fast_ring_event(phba,
James Smart895427b2017-02-12 13:52:30 -080012756 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
James Smarta4bc3372006-12-02 13:34:16 -050012757 status);
12758 }
12759 }
dea31012005-04-17 16:05:31 -050012760 return IRQ_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040012761} /* lpfc_sli_fp_intr_handler */
dea31012005-04-17 16:05:31 -050012762
James Smart93996272008-08-24 21:50:30 -040012763/**
James Smart3772a992009-05-22 14:50:54 -040012764 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
James Smart93996272008-08-24 21:50:30 -040012765 * @irq: Interrupt number.
12766 * @dev_id: The device context pointer.
12767 *
James Smart3772a992009-05-22 14:50:54 -040012768 * This function is the HBA device-level interrupt handler to device with
12769 * SLI-3 interface spec, called from the PCI layer when either MSI or
12770 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12771 * requires driver attention. This function invokes the slow-path interrupt
12772 * attention handling function and fast-path interrupt attention handling
12773 * function in turn to process the relevant HBA attention events. This
12774 * function is called without any lock held. It gets the hbalock to access
12775 * and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040012776 *
12777 * This function returns IRQ_HANDLED when interrupt is handled, else it
12778 * returns IRQ_NONE.
12779 **/
12780irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040012781lpfc_sli_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040012782{
12783 struct lpfc_hba *phba;
12784 irqreturn_t sp_irq_rc, fp_irq_rc;
12785 unsigned long status1, status2;
James Smarta747c9c2009-11-18 15:41:10 -050012786 uint32_t hc_copy;
James Smart93996272008-08-24 21:50:30 -040012787
12788 /*
12789 * Get the driver's phba structure from the dev_id and
12790 * assume the HBA is not interrupting.
12791 */
12792 phba = (struct lpfc_hba *) dev_id;
12793
12794 if (unlikely(!phba))
12795 return IRQ_NONE;
12796
James Smart3772a992009-05-22 14:50:54 -040012797 /* Check device state for handling interrupt */
12798 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012799 return IRQ_NONE;
12800
12801 spin_lock(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -050012802 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12803 spin_unlock(&phba->hbalock);
12804 return IRQ_HANDLED;
12805 }
12806
James Smart93996272008-08-24 21:50:30 -040012807 if (unlikely(!phba->ha_copy)) {
12808 spin_unlock(&phba->hbalock);
12809 return IRQ_NONE;
12810 } else if (phba->ha_copy & HA_ERATT) {
12811 if (phba->hba_flag & HBA_ERATT_HANDLED)
12812 /* ERATT polling has handled ERATT */
12813 phba->ha_copy &= ~HA_ERATT;
12814 else
12815 /* Indicate interrupt handler handles ERATT */
12816 phba->hba_flag |= HBA_ERATT_HANDLED;
12817 }
12818
James Smarta257bf92009-04-06 18:48:10 -040012819 /*
12820 * If there is deferred error attention, do not check for any interrupt.
12821 */
12822 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020012823 spin_unlock(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -040012824 return IRQ_NONE;
12825 }
12826
James Smart93996272008-08-24 21:50:30 -040012827 /* Clear attention sources except link and error attentions */
James Smart9940b972011-03-11 16:06:12 -050012828 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12829 spin_unlock(&phba->hbalock);
12830 return IRQ_HANDLED;
12831 }
James Smarta747c9c2009-11-18 15:41:10 -050012832 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12833 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12834 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012835 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050012836 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012837 readl(phba->HAregaddr); /* flush */
12838 spin_unlock(&phba->hbalock);
12839
12840 /*
12841 * Invokes slow-path host attention interrupt handling as appropriate.
12842 */
12843
12844 /* status of events with mailbox and link attention */
12845 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12846
12847 /* status of events with ELS ring */
12848 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12849 status2 >>= (4*LPFC_ELS_RING);
12850
12851 if (status1 || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040012852 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040012853 else
12854 sp_irq_rc = IRQ_NONE;
12855
12856 /*
12857 * Invoke fast-path host attention interrupt handling as appropriate.
12858 */
12859
12860 /* status of events with FCP ring */
12861 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12862 status1 >>= (4*LPFC_FCP_RING);
12863
12864 /* status of events with extra ring */
12865 if (phba->cfg_multi_ring_support == 2) {
12866 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12867 status2 >>= (4*LPFC_EXTRA_RING);
12868 } else
12869 status2 = 0;
12870
12871 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040012872 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040012873 else
12874 fp_irq_rc = IRQ_NONE;
12875
12876 /* Return device-level interrupt handling status */
12877 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
James Smart3772a992009-05-22 14:50:54 -040012878} /* lpfc_sli_intr_handler */
James Smart4f774512009-05-22 14:52:35 -040012879
12880/**
James Smart4f774512009-05-22 14:52:35 -040012881 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12882 * @phba: pointer to lpfc hba data structure.
12883 *
12884 * This routine is invoked by the worker thread to process all the pending
12885 * SLI4 els abort xri events.
12886 **/
12887void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12888{
12889 struct lpfc_cq_event *cq_event;
12890
12891 /* First, declare the els xri abort event has been handled */
12892 spin_lock_irq(&phba->hbalock);
12893 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12894 spin_unlock_irq(&phba->hbalock);
12895 /* Now, handle all the els xri abort events */
12896 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12897 /* Get the first event from the head of the event queue */
12898 spin_lock_irq(&phba->hbalock);
12899 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12900 cq_event, struct lpfc_cq_event, list);
12901 spin_unlock_irq(&phba->hbalock);
12902 /* Notify aborted XRI for ELS work queue */
12903 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12904 /* Free the event processed back to the free pool */
12905 lpfc_sli4_cq_event_release(phba, cq_event);
12906 }
12907}
12908
James Smart341af102010-01-26 23:07:37 -050012909/**
12910 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12911 * @phba: pointer to lpfc hba data structure
12912 * @pIocbIn: pointer to the rspiocbq
12913 * @pIocbOut: pointer to the cmdiocbq
12914 * @wcqe: pointer to the complete wcqe
12915 *
12916 * This routine transfers the fields of a command iocbq to a response iocbq
12917 * by copying all the IOCB fields from command iocbq and transferring the
12918 * completion status information from the complete wcqe.
12919 **/
James Smart4f774512009-05-22 14:52:35 -040012920static void
James Smart341af102010-01-26 23:07:37 -050012921lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12922 struct lpfc_iocbq *pIocbIn,
James Smart4f774512009-05-22 14:52:35 -040012923 struct lpfc_iocbq *pIocbOut,
12924 struct lpfc_wcqe_complete *wcqe)
12925{
James Smartaf227412013-10-10 12:23:10 -040012926 int numBdes, i;
James Smart341af102010-01-26 23:07:37 -050012927 unsigned long iflags;
James Smartaf227412013-10-10 12:23:10 -040012928 uint32_t status, max_response;
12929 struct lpfc_dmabuf *dmabuf;
12930 struct ulp_bde64 *bpl, bde;
James Smart4f774512009-05-22 14:52:35 -040012931 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12932
12933 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12934 sizeof(struct lpfc_iocbq) - offset);
James Smart4f774512009-05-22 14:52:35 -040012935 /* Map WCQE parameters into irspiocb parameters */
James Smartacd68592012-01-18 16:25:09 -050012936 status = bf_get(lpfc_wcqe_c_status, wcqe);
12937 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
James Smart4f774512009-05-22 14:52:35 -040012938 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12939 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12940 pIocbIn->iocb.un.fcpi.fcpi_parm =
12941 pIocbOut->iocb.un.fcpi.fcpi_parm -
12942 wcqe->total_data_placed;
12943 else
12944 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smart695a8142010-01-26 23:08:03 -050012945 else {
James Smart4f774512009-05-22 14:52:35 -040012946 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smartaf227412013-10-10 12:23:10 -040012947 switch (pIocbOut->iocb.ulpCommand) {
12948 case CMD_ELS_REQUEST64_CR:
12949 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12950 bpl = (struct ulp_bde64 *)dmabuf->virt;
12951 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12952 max_response = bde.tus.f.bdeSize;
12953 break;
12954 case CMD_GEN_REQUEST64_CR:
12955 max_response = 0;
12956 if (!pIocbOut->context3)
12957 break;
12958 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12959 sizeof(struct ulp_bde64);
12960 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12961 bpl = (struct ulp_bde64 *)dmabuf->virt;
12962 for (i = 0; i < numBdes; i++) {
12963 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12964 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12965 max_response += bde.tus.f.bdeSize;
12966 }
12967 break;
12968 default:
12969 max_response = wcqe->total_data_placed;
12970 break;
12971 }
12972 if (max_response < wcqe->total_data_placed)
12973 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12974 else
12975 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12976 wcqe->total_data_placed;
James Smart695a8142010-01-26 23:08:03 -050012977 }
James Smart341af102010-01-26 23:07:37 -050012978
James Smartacd68592012-01-18 16:25:09 -050012979 /* Convert BG errors for completion status */
12980 if (status == CQE_STATUS_DI_ERROR) {
12981 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12982
12983 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12984 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12985 else
12986 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12987
12988 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12989 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12990 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12991 BGS_GUARD_ERR_MASK;
12992 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12993 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12994 BGS_APPTAG_ERR_MASK;
12995 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12996 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12997 BGS_REFTAG_ERR_MASK;
12998
12999 /* Check to see if there was any good data before the error */
13000 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13001 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13002 BGS_HI_WATER_MARK_PRESENT_MASK;
13003 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13004 wcqe->total_data_placed;
13005 }
13006
13007 /*
13008 * Set ALL the error bits to indicate we don't know what
13009 * type of error it is.
13010 */
13011 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13012 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13013 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13014 BGS_GUARD_ERR_MASK);
13015 }
13016
James Smart341af102010-01-26 23:07:37 -050013017 /* Pick up HBA exchange busy condition */
13018 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13019 spin_lock_irqsave(&phba->hbalock, iflags);
13020 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13021 spin_unlock_irqrestore(&phba->hbalock, iflags);
13022 }
James Smart4f774512009-05-22 14:52:35 -040013023}
13024
13025/**
James Smart45ed1192009-10-02 15:17:02 -040013026 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13027 * @phba: Pointer to HBA context object.
13028 * @wcqe: Pointer to work-queue completion queue entry.
13029 *
13030 * This routine handles an ELS work-queue completion event and construct
13031 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13032 * discovery engine to handle.
13033 *
13034 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13035 **/
13036static struct lpfc_iocbq *
13037lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13038 struct lpfc_iocbq *irspiocbq)
13039{
James Smart895427b2017-02-12 13:52:30 -080013040 struct lpfc_sli_ring *pring;
James Smart45ed1192009-10-02 15:17:02 -040013041 struct lpfc_iocbq *cmdiocbq;
13042 struct lpfc_wcqe_complete *wcqe;
13043 unsigned long iflags;
13044
James Smart895427b2017-02-12 13:52:30 -080013045 pring = lpfc_phba_elsring(phba);
Dick Kennedy1234a6d2017-09-29 17:34:29 -070013046 if (unlikely(!pring))
13047 return NULL;
James Smart895427b2017-02-12 13:52:30 -080013048
James Smart45ed1192009-10-02 15:17:02 -040013049 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
James Smart45ed1192009-10-02 15:17:02 -040013050 pring->stats.iocb_event++;
13051 /* Look up the ELS command IOCB and create pseudo response IOCB */
13052 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13053 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart45ed1192009-10-02 15:17:02 -040013054 if (unlikely(!cmdiocbq)) {
13055 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13056 "0386 ELS complete with no corresponding "
Dick Kennedy401bb412017-09-29 17:34:28 -070013057 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13058 wcqe->word0, wcqe->total_data_placed,
13059 wcqe->parameter, wcqe->word3);
James Smart45ed1192009-10-02 15:17:02 -040013060 lpfc_sli_release_iocbq(phba, irspiocbq);
13061 return NULL;
13062 }
13063
James Smarte2a8be52019-05-06 17:26:47 -070013064 spin_lock_irqsave(&pring->ring_lock, iflags);
Dick Kennedy401bb412017-09-29 17:34:28 -070013065 /* Put the iocb back on the txcmplq */
13066 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13067 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13068
James Smart45ed1192009-10-02 15:17:02 -040013069 /* Fake the irspiocbq and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050013070 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
James Smart45ed1192009-10-02 15:17:02 -040013071
13072 return irspiocbq;
13073}
13074
James Smart8a5ca102017-11-20 16:00:30 -080013075inline struct lpfc_cq_event *
13076lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13077{
13078 struct lpfc_cq_event *cq_event;
13079
13080 /* Allocate a new internal CQ_EVENT entry */
13081 cq_event = lpfc_sli4_cq_event_alloc(phba);
13082 if (!cq_event) {
13083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13084 "0602 Failed to alloc CQ_EVENT entry\n");
13085 return NULL;
13086 }
13087
13088 /* Move the CQE into the event */
13089 memcpy(&cq_event->cqe, entry, size);
13090 return cq_event;
13091}
13092
James Smart45ed1192009-10-02 15:17:02 -040013093/**
Colin Ian King291c2542019-12-18 08:43:01 +000013094 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
James Smart04c68492009-05-22 14:52:52 -040013095 * @phba: Pointer to HBA context object.
13096 * @cqe: Pointer to mailbox completion queue entry.
13097 *
Colin Ian King291c2542019-12-18 08:43:01 +000013098 * This routine process a mailbox completion queue entry with asynchronous
James Smart04c68492009-05-22 14:52:52 -040013099 * event.
13100 *
13101 * Return: true if work posted to worker thread, otherwise false.
13102 **/
13103static bool
13104lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13105{
13106 struct lpfc_cq_event *cq_event;
13107 unsigned long iflags;
13108
13109 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13110 "0392 Async Event: word0:x%x, word1:x%x, "
13111 "word2:x%x, word3:x%x\n", mcqe->word0,
13112 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13113
James Smart8a5ca102017-11-20 16:00:30 -080013114 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13115 if (!cq_event)
James Smart04c68492009-05-22 14:52:52 -040013116 return false;
James Smart04c68492009-05-22 14:52:52 -040013117 spin_lock_irqsave(&phba->hbalock, iflags);
13118 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13119 /* Set the async event flag */
13120 phba->hba_flag |= ASYNC_EVENT;
13121 spin_unlock_irqrestore(&phba->hbalock, iflags);
13122
13123 return true;
13124}
13125
13126/**
13127 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13128 * @phba: Pointer to HBA context object.
13129 * @cqe: Pointer to mailbox completion queue entry.
13130 *
13131 * This routine process a mailbox completion queue entry with mailbox
13132 * completion event.
13133 *
13134 * Return: true if work posted to worker thread, otherwise false.
13135 **/
13136static bool
13137lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13138{
13139 uint32_t mcqe_status;
13140 MAILBOX_t *mbox, *pmbox;
13141 struct lpfc_mqe *mqe;
13142 struct lpfc_vport *vport;
13143 struct lpfc_nodelist *ndlp;
13144 struct lpfc_dmabuf *mp;
13145 unsigned long iflags;
13146 LPFC_MBOXQ_t *pmb;
13147 bool workposted = false;
13148 int rc;
13149
13150 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13151 if (!bf_get(lpfc_trailer_completed, mcqe))
13152 goto out_no_mqe_complete;
13153
13154 /* Get the reference to the active mbox command */
13155 spin_lock_irqsave(&phba->hbalock, iflags);
13156 pmb = phba->sli.mbox_active;
13157 if (unlikely(!pmb)) {
13158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13159 "1832 No pending MBOX command to handle\n");
13160 spin_unlock_irqrestore(&phba->hbalock, iflags);
13161 goto out_no_mqe_complete;
13162 }
13163 spin_unlock_irqrestore(&phba->hbalock, iflags);
13164 mqe = &pmb->u.mqe;
13165 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13166 mbox = phba->mbox;
13167 vport = pmb->vport;
13168
13169 /* Reset heartbeat timer */
13170 phba->last_completion_time = jiffies;
13171 del_timer(&phba->sli.mbox_tmo);
13172
13173 /* Move mbox data to caller's mailbox region, do endian swapping */
13174 if (pmb->mbox_cmpl && mbox)
James Smart48f8fdb2018-05-04 20:37:51 -070013175 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
James Smart04c68492009-05-22 14:52:52 -040013176
James Smart73d91e52011-10-10 21:32:10 -040013177 /*
13178 * For mcqe errors, conditionally move a modified error code to
13179 * the mbox so that the error will not be missed.
13180 */
13181 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13182 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13183 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13184 bf_set(lpfc_mqe_status, mqe,
13185 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13186 }
James Smart04c68492009-05-22 14:52:52 -040013187 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13188 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13189 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13190 "MBOX dflt rpi: status:x%x rpi:x%x",
13191 mcqe_status,
13192 pmbox->un.varWords[0], 0);
13193 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
James Smart3e1f0712018-11-29 16:09:29 -080013194 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13195 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
James Smart04c68492009-05-22 14:52:52 -040013196 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13197 * RID of the PPI using the same mbox buffer.
13198 */
13199 lpfc_unreg_login(phba, vport->vpi,
13200 pmbox->un.varWords[0], pmb);
13201 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
James Smart3e1f0712018-11-29 16:09:29 -080013202 pmb->ctx_buf = mp;
13203 pmb->ctx_ndlp = ndlp;
James Smart04c68492009-05-22 14:52:52 -040013204 pmb->vport = vport;
13205 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13206 if (rc != MBX_BUSY)
13207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13208 LOG_SLI, "0385 rc should "
13209 "have been MBX_BUSY\n");
13210 if (rc != MBX_NOT_FINISHED)
13211 goto send_current_mbox;
13212 }
13213 }
13214 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13215 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13216 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13217
13218 /* There is mailbox completion work to do */
13219 spin_lock_irqsave(&phba->hbalock, iflags);
13220 __lpfc_mbox_cmpl_put(phba, pmb);
13221 phba->work_ha |= HA_MBATT;
13222 spin_unlock_irqrestore(&phba->hbalock, iflags);
13223 workposted = true;
13224
13225send_current_mbox:
13226 spin_lock_irqsave(&phba->hbalock, iflags);
13227 /* Release the mailbox command posting token */
13228 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13229 /* Setting active mailbox pointer need to be in sync to flag clear */
13230 phba->sli.mbox_active = NULL;
James Smart07b85822019-09-21 20:58:53 -070013231 if (bf_get(lpfc_trailer_consumed, mcqe))
13232 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
James Smart04c68492009-05-22 14:52:52 -040013233 spin_unlock_irqrestore(&phba->hbalock, iflags);
13234 /* Wake up worker thread to post the next pending mailbox command */
13235 lpfc_worker_wake_up(phba);
James Smart07b85822019-09-21 20:58:53 -070013236 return workposted;
13237
James Smart04c68492009-05-22 14:52:52 -040013238out_no_mqe_complete:
James Smart07b85822019-09-21 20:58:53 -070013239 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart04c68492009-05-22 14:52:52 -040013240 if (bf_get(lpfc_trailer_consumed, mcqe))
13241 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
James Smart07b85822019-09-21 20:58:53 -070013242 spin_unlock_irqrestore(&phba->hbalock, iflags);
13243 return false;
James Smart04c68492009-05-22 14:52:52 -040013244}
13245
13246/**
13247 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13248 * @phba: Pointer to HBA context object.
13249 * @cqe: Pointer to mailbox completion queue entry.
13250 *
13251 * This routine process a mailbox completion queue entry, it invokes the
Colin Ian King291c2542019-12-18 08:43:01 +000013252 * proper mailbox complete handling or asynchronous event handling routine
James Smart04c68492009-05-22 14:52:52 -040013253 * according to the MCQE's async bit.
13254 *
13255 * Return: true if work posted to worker thread, otherwise false.
13256 **/
13257static bool
James Smart32517fc2019-01-28 11:14:33 -080013258lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13259 struct lpfc_cqe *cqe)
James Smart04c68492009-05-22 14:52:52 -040013260{
13261 struct lpfc_mcqe mcqe;
13262 bool workposted;
13263
James Smart32517fc2019-01-28 11:14:33 -080013264 cq->CQ_mbox++;
13265
James Smart04c68492009-05-22 14:52:52 -040013266 /* Copy the mailbox MCQE and convert endian order as needed */
James Smart48f8fdb2018-05-04 20:37:51 -070013267 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
James Smart04c68492009-05-22 14:52:52 -040013268
13269 /* Invoke the proper event handling routine */
13270 if (!bf_get(lpfc_trailer_async, &mcqe))
13271 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13272 else
13273 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13274 return workposted;
13275}
13276
13277/**
James Smart4f774512009-05-22 14:52:35 -040013278 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13279 * @phba: Pointer to HBA context object.
James Smart2a76a282012-08-03 12:35:54 -040013280 * @cq: Pointer to associated CQ
James Smart4f774512009-05-22 14:52:35 -040013281 * @wcqe: Pointer to work-queue completion queue entry.
13282 *
13283 * This routine handles an ELS work-queue completion event.
13284 *
13285 * Return: true if work posted to worker thread, otherwise false.
13286 **/
13287static bool
James Smart2a76a282012-08-03 12:35:54 -040013288lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013289 struct lpfc_wcqe_complete *wcqe)
13290{
James Smart4f774512009-05-22 14:52:35 -040013291 struct lpfc_iocbq *irspiocbq;
13292 unsigned long iflags;
James Smart2a76a282012-08-03 12:35:54 -040013293 struct lpfc_sli_ring *pring = cq->pring;
James Smart0e9bb8d2013-03-01 16:35:12 -050013294 int txq_cnt = 0;
13295 int txcmplq_cnt = 0;
James Smart4f774512009-05-22 14:52:35 -040013296
James Smart11f0e342018-05-04 20:37:57 -070013297 /* Check for response status */
13298 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13299 /* Log the error status */
13300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13301 "0357 ELS CQE error: status=x%x: "
13302 "CQE: %08x %08x %08x %08x\n",
13303 bf_get(lpfc_wcqe_c_status, wcqe),
13304 wcqe->word0, wcqe->total_data_placed,
13305 wcqe->parameter, wcqe->word3);
13306 }
13307
James Smart45ed1192009-10-02 15:17:02 -040013308 /* Get an irspiocbq for later ELS response processing use */
James Smart4f774512009-05-22 14:52:35 -040013309 irspiocbq = lpfc_sli_get_iocbq(phba);
13310 if (!irspiocbq) {
James Smart0e9bb8d2013-03-01 16:35:12 -050013311 if (!list_empty(&pring->txq))
13312 txq_cnt++;
13313 if (!list_empty(&pring->txcmplq))
13314 txcmplq_cnt++;
James Smart4f774512009-05-22 14:52:35 -040013315 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2a9bf3d2010-06-07 15:24:45 -040013316 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
James Smartff349bc2019-09-21 20:59:05 -070013317 "els_txcmplq_cnt=%d\n",
James Smart0e9bb8d2013-03-01 16:35:12 -050013318 txq_cnt, phba->iocb_cnt,
James Smart0e9bb8d2013-03-01 16:35:12 -050013319 txcmplq_cnt);
James Smart45ed1192009-10-02 15:17:02 -040013320 return false;
James Smart4f774512009-05-22 14:52:35 -040013321 }
James Smart4f774512009-05-22 14:52:35 -040013322
James Smart45ed1192009-10-02 15:17:02 -040013323 /* Save off the slow-path queue event for work thread to process */
13324 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
James Smart4f774512009-05-22 14:52:35 -040013325 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart4d9ab992009-10-02 15:16:39 -040013326 list_add_tail(&irspiocbq->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040013327 &phba->sli4_hba.sp_queue_event);
13328 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4f774512009-05-22 14:52:35 -040013329 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013330
James Smart45ed1192009-10-02 15:17:02 -040013331 return true;
James Smart4f774512009-05-22 14:52:35 -040013332}
13333
13334/**
13335 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13336 * @phba: Pointer to HBA context object.
13337 * @wcqe: Pointer to work-queue completion queue entry.
13338 *
Masahiro Yamada3f8b6fb2017-02-27 14:29:25 -080013339 * This routine handles slow-path WQ entry consumed event by invoking the
James Smart4f774512009-05-22 14:52:35 -040013340 * proper WQ release routine to the slow-path WQ.
13341 **/
13342static void
13343lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13344 struct lpfc_wcqe_release *wcqe)
13345{
James Smart2e90f4b2011-12-13 13:22:37 -050013346 /* sanity check on queue memory */
13347 if (unlikely(!phba->sli4_hba.els_wq))
13348 return;
James Smart4f774512009-05-22 14:52:35 -040013349 /* Check for the slow-path ELS work queue */
13350 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13351 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13352 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13353 else
13354 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13355 "2579 Slow-path wqe consume event carries "
13356 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13357 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13358 phba->sli4_hba.els_wq->queue_id);
13359}
13360
13361/**
13362 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13363 * @phba: Pointer to HBA context object.
13364 * @cq: Pointer to a WQ completion queue.
13365 * @wcqe: Pointer to work-queue completion queue entry.
13366 *
13367 * This routine handles an XRI abort event.
13368 *
13369 * Return: true if work posted to worker thread, otherwise false.
13370 **/
13371static bool
13372lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13373 struct lpfc_queue *cq,
13374 struct sli4_wcqe_xri_aborted *wcqe)
13375{
13376 bool workposted = false;
13377 struct lpfc_cq_event *cq_event;
13378 unsigned long iflags;
13379
James Smart4f774512009-05-22 14:52:35 -040013380 switch (cq->subtype) {
James Smartc00f62e2019-08-14 16:57:11 -070013381 case LPFC_IO:
13382 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13383 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13384 /* Notify aborted XRI for NVME work queue */
13385 if (phba->nvmet_support)
13386 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13387 }
James Smart5e5b5112019-01-28 11:14:22 -080013388 workposted = false;
James Smart4f774512009-05-22 14:52:35 -040013389 break;
James Smart422c4cb2017-11-20 16:00:32 -080013390 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
James Smart4f774512009-05-22 14:52:35 -040013391 case LPFC_ELS:
James Smart8a5ca102017-11-20 16:00:30 -080013392 cq_event = lpfc_cq_event_setup(
13393 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13394 if (!cq_event)
13395 return false;
James Smart5e5b5112019-01-28 11:14:22 -080013396 cq_event->hdwq = cq->hdwq;
James Smart4f774512009-05-22 14:52:35 -040013397 spin_lock_irqsave(&phba->hbalock, iflags);
13398 list_add_tail(&cq_event->list,
13399 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13400 /* Set the els xri abort event flag */
13401 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13402 spin_unlock_irqrestore(&phba->hbalock, iflags);
13403 workposted = true;
13404 break;
13405 default:
13406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart318083a2017-03-04 09:30:30 -080013407 "0603 Invalid CQ subtype %d: "
13408 "%08x %08x %08x %08x\n",
13409 cq->subtype, wcqe->word0, wcqe->parameter,
13410 wcqe->word2, wcqe->word3);
James Smart4f774512009-05-22 14:52:35 -040013411 workposted = false;
13412 break;
13413 }
13414 return workposted;
13415}
13416
James Smarte817e5d2018-12-13 15:17:53 -080013417#define FC_RCTL_MDS_DIAGS 0xF4
13418
James Smart4f774512009-05-22 14:52:35 -040013419/**
James Smart4d9ab992009-10-02 15:16:39 -040013420 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
James Smart4f774512009-05-22 14:52:35 -040013421 * @phba: Pointer to HBA context object.
James Smart4d9ab992009-10-02 15:16:39 -040013422 * @rcqe: Pointer to receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040013423 *
James Smart4d9ab992009-10-02 15:16:39 -040013424 * This routine process a receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040013425 *
13426 * Return: true if work posted to worker thread, otherwise false.
13427 **/
13428static bool
James Smart4d9ab992009-10-02 15:16:39 -040013429lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13430{
13431 bool workposted = false;
James Smarte817e5d2018-12-13 15:17:53 -080013432 struct fc_frame_header *fc_hdr;
James Smart4d9ab992009-10-02 15:16:39 -040013433 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13434 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
James Smart547077a2017-05-15 15:20:40 -070013435 struct lpfc_nvmet_tgtport *tgtp;
James Smart4d9ab992009-10-02 15:16:39 -040013436 struct hbq_dmabuf *dma_buf;
James Smart7851fe22011-07-22 18:36:52 -040013437 uint32_t status, rq_id;
James Smart4d9ab992009-10-02 15:16:39 -040013438 unsigned long iflags;
13439
James Smart2e90f4b2011-12-13 13:22:37 -050013440 /* sanity check on queue memory */
13441 if (unlikely(!hrq) || unlikely(!drq))
13442 return workposted;
13443
James Smart7851fe22011-07-22 18:36:52 -040013444 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13445 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13446 else
13447 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13448 if (rq_id != hrq->queue_id)
James Smart4d9ab992009-10-02 15:16:39 -040013449 goto out;
13450
13451 status = bf_get(lpfc_rcqe_status, rcqe);
13452 switch (status) {
13453 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13454 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13455 "2537 Receive Frame Truncated!!\n");
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060013456 /* fall through */
James Smart4d9ab992009-10-02 15:16:39 -040013457 case FC_STATUS_RQ_SUCCESS:
13458 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartcbc5de12017-12-08 17:18:04 -080013459 lpfc_sli4_rq_release(hrq, drq);
James Smart4d9ab992009-10-02 15:16:39 -040013460 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13461 if (!dma_buf) {
James Smartb84daac2012-08-03 12:35:13 -040013462 hrq->RQ_no_buf_found++;
James Smart4d9ab992009-10-02 15:16:39 -040013463 spin_unlock_irqrestore(&phba->hbalock, iflags);
13464 goto out;
13465 }
James Smartb84daac2012-08-03 12:35:13 -040013466 hrq->RQ_rcv_buf++;
James Smart547077a2017-05-15 15:20:40 -070013467 hrq->RQ_buf_posted--;
James Smart4d9ab992009-10-02 15:16:39 -040013468 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
James Smart895427b2017-02-12 13:52:30 -080013469
James Smarte817e5d2018-12-13 15:17:53 -080013470 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13471
13472 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13473 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13474 spin_unlock_irqrestore(&phba->hbalock, iflags);
13475 /* Handle MDS Loopback frames */
13476 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13477 break;
13478 }
13479
13480 /* save off the frame for the work thread to process */
James Smart4d9ab992009-10-02 15:16:39 -040013481 list_add_tail(&dma_buf->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040013482 &phba->sli4_hba.sp_queue_event);
James Smart4d9ab992009-10-02 15:16:39 -040013483 /* Frame received */
James Smart45ed1192009-10-02 15:17:02 -040013484 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4d9ab992009-10-02 15:16:39 -040013485 spin_unlock_irqrestore(&phba->hbalock, iflags);
13486 workposted = true;
13487 break;
James Smart4d9ab992009-10-02 15:16:39 -040013488 case FC_STATUS_INSUFF_BUF_FRM_DISC:
James Smart547077a2017-05-15 15:20:40 -070013489 if (phba->nvmet_support) {
13490 tgtp = phba->targetport->private;
13491 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13492 "6402 RQE Error x%x, posted %d err_cnt "
13493 "%d: %x %x %x\n",
13494 status, hrq->RQ_buf_posted,
13495 hrq->RQ_no_posted_buf,
13496 atomic_read(&tgtp->rcv_fcp_cmd_in),
13497 atomic_read(&tgtp->rcv_fcp_cmd_out),
13498 atomic_read(&tgtp->xmt_fcp_release));
13499 }
13500 /* fallthrough */
13501
13502 case FC_STATUS_INSUFF_BUF_NEED_BUF:
James Smartb84daac2012-08-03 12:35:13 -040013503 hrq->RQ_no_posted_buf++;
James Smart4d9ab992009-10-02 15:16:39 -040013504 /* Post more buffers if possible */
13505 spin_lock_irqsave(&phba->hbalock, iflags);
13506 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13507 spin_unlock_irqrestore(&phba->hbalock, iflags);
13508 workposted = true;
13509 break;
13510 }
13511out:
13512 return workposted;
James Smart4d9ab992009-10-02 15:16:39 -040013513}
13514
13515/**
13516 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13517 * @phba: Pointer to HBA context object.
13518 * @cq: Pointer to the completion queue.
James Smart32517fc2019-01-28 11:14:33 -080013519 * @cqe: Pointer to a completion queue entry.
James Smart4d9ab992009-10-02 15:16:39 -040013520 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030013521 * This routine process a slow-path work-queue or receive queue completion queue
James Smart4d9ab992009-10-02 15:16:39 -040013522 * entry.
13523 *
13524 * Return: true if work posted to worker thread, otherwise false.
13525 **/
13526static bool
13527lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013528 struct lpfc_cqe *cqe)
13529{
James Smart45ed1192009-10-02 15:17:02 -040013530 struct lpfc_cqe cqevt;
James Smart4f774512009-05-22 14:52:35 -040013531 bool workposted = false;
13532
13533 /* Copy the work queue CQE and convert endian order if needed */
James Smart48f8fdb2018-05-04 20:37:51 -070013534 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040013535
13536 /* Check and process for different type of WCQE and dispatch */
James Smart45ed1192009-10-02 15:17:02 -040013537 switch (bf_get(lpfc_cqe_code, &cqevt)) {
James Smart4f774512009-05-22 14:52:35 -040013538 case CQE_CODE_COMPL_WQE:
James Smart45ed1192009-10-02 15:17:02 -040013539 /* Process the WQ/RQ complete event */
James Smartbc739052010-08-04 16:11:18 -040013540 phba->last_completion_time = jiffies;
James Smart2a76a282012-08-03 12:35:54 -040013541 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040013542 (struct lpfc_wcqe_complete *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040013543 break;
13544 case CQE_CODE_RELEASE_WQE:
13545 /* Process the WQ release event */
13546 lpfc_sli4_sp_handle_rel_wcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040013547 (struct lpfc_wcqe_release *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040013548 break;
13549 case CQE_CODE_XRI_ABORTED:
13550 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040013551 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040013552 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040013553 (struct sli4_wcqe_xri_aborted *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040013554 break;
James Smart4d9ab992009-10-02 15:16:39 -040013555 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -040013556 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -040013557 /* Process the RQ event */
James Smartbc739052010-08-04 16:11:18 -040013558 phba->last_completion_time = jiffies;
James Smart4d9ab992009-10-02 15:16:39 -040013559 workposted = lpfc_sli4_sp_handle_rcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040013560 (struct lpfc_rcqe *)&cqevt);
James Smart4d9ab992009-10-02 15:16:39 -040013561 break;
James Smart4f774512009-05-22 14:52:35 -040013562 default:
13563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13564 "0388 Not a valid WCQE code: x%x\n",
James Smart45ed1192009-10-02 15:17:02 -040013565 bf_get(lpfc_cqe_code, &cqevt));
James Smart4f774512009-05-22 14:52:35 -040013566 break;
13567 }
13568 return workposted;
13569}
13570
13571/**
James Smart4f774512009-05-22 14:52:35 -040013572 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13573 * @phba: Pointer to HBA context object.
13574 * @eqe: Pointer to fast-path event queue entry.
13575 *
13576 * This routine process a event queue entry from the slow-path event queue.
13577 * It will check the MajorCode and MinorCode to determine this is for a
13578 * completion event on a completion queue, if not, an error shall be logged
13579 * and just return. Otherwise, it will get to the corresponding completion
13580 * queue and process all the entries on that completion queue, rearm the
13581 * completion queue, and then return.
13582 *
13583 **/
Dick Kennedyf485c182017-09-29 17:34:34 -070013584static void
James Smart67d12732012-08-03 12:36:13 -040013585lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13586 struct lpfc_queue *speq)
James Smart4f774512009-05-22 14:52:35 -040013587{
James Smart67d12732012-08-03 12:36:13 -040013588 struct lpfc_queue *cq = NULL, *childq;
James Smart4f774512009-05-22 14:52:35 -040013589 uint16_t cqid;
13590
James Smart4f774512009-05-22 14:52:35 -040013591 /* Get the reference to the corresponding CQ */
James Smartcb5172e2010-03-15 11:25:07 -040013592 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
James Smart4f774512009-05-22 14:52:35 -040013593
James Smart4f774512009-05-22 14:52:35 -040013594 list_for_each_entry(childq, &speq->child_list, list) {
13595 if (childq->queue_id == cqid) {
13596 cq = childq;
13597 break;
13598 }
13599 }
13600 if (unlikely(!cq)) {
James Smart75baf692010-06-08 18:31:21 -040013601 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13603 "0365 Slow-path CQ identifier "
13604 "(%d) does not exist\n", cqid);
Dick Kennedyf485c182017-09-29 17:34:34 -070013605 return;
James Smart4f774512009-05-22 14:52:35 -040013606 }
13607
James Smart895427b2017-02-12 13:52:30 -080013608 /* Save EQ associated with this CQ */
13609 cq->assoc_qp = speq;
13610
James Smart6a828b02019-01-28 11:14:31 -080013611 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
Dick Kennedyf485c182017-09-29 17:34:34 -070013612 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13613 "0390 Cannot schedule soft IRQ "
13614 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
Bart Van Assched6d189c2019-03-28 11:06:22 -070013615 cqid, cq->queue_id, raw_smp_processor_id());
Dick Kennedyf485c182017-09-29 17:34:34 -070013616}
13617
13618/**
James Smart32517fc2019-01-28 11:14:33 -080013619 * __lpfc_sli4_process_cq - Process elements of a CQ
Dick Kennedyf485c182017-09-29 17:34:34 -070013620 * @phba: Pointer to HBA context object.
James Smart32517fc2019-01-28 11:14:33 -080013621 * @cq: Pointer to CQ to be processed
13622 * @handler: Routine to process each cqe
13623 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
Dick Kennedyf485c182017-09-29 17:34:34 -070013624 *
James Smart32517fc2019-01-28 11:14:33 -080013625 * This routine processes completion queue entries in a CQ. While a valid
13626 * queue element is found, the handler is called. During processing checks
13627 * are made for periodic doorbell writes to let the hardware know of
13628 * element consumption.
Dick Kennedyf485c182017-09-29 17:34:34 -070013629 *
James Smart32517fc2019-01-28 11:14:33 -080013630 * If the max limit on cqes to process is hit, or there are no more valid
13631 * entries, the loop stops. If we processed a sufficient number of elements,
13632 * meaning there is sufficient load, rather than rearming and generating
13633 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13634 * indicates no rescheduling.
13635 *
13636 * Returns True if work scheduled, False otherwise.
Dick Kennedyf485c182017-09-29 17:34:34 -070013637 **/
James Smart32517fc2019-01-28 11:14:33 -080013638static bool
13639__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13640 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13641 struct lpfc_cqe *), unsigned long *delay)
Dick Kennedyf485c182017-09-29 17:34:34 -070013642{
Dick Kennedyf485c182017-09-29 17:34:34 -070013643 struct lpfc_cqe *cqe;
13644 bool workposted = false;
James Smart32517fc2019-01-28 11:14:33 -080013645 int count = 0, consumed = 0;
13646 bool arm = true;
13647
13648 /* default - no reschedule */
13649 *delay = 0;
13650
13651 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13652 goto rearm_and_exit;
Dick Kennedyf485c182017-09-29 17:34:34 -070013653
James Smart4f774512009-05-22 14:52:35 -040013654 /* Process all the entries to the CQ */
James Smartd74a89a2019-05-21 17:48:55 -070013655 cq->q_flag = 0;
James Smart32517fc2019-01-28 11:14:33 -080013656 cqe = lpfc_sli4_cq_get(cq);
13657 while (cqe) {
James Smart32517fc2019-01-28 11:14:33 -080013658 workposted |= handler(phba, cq, cqe);
13659 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13660
13661 consumed++;
13662 if (!(++count % cq->max_proc_limit))
13663 break;
13664
13665 if (!(count % cq->notify_interval)) {
13666 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13667 LPFC_QUEUE_NOARM);
13668 consumed = 0;
James Smart8156d372019-10-18 14:18:26 -070013669 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
James Smart4f774512009-05-22 14:52:35 -040013670 }
James Smartb84daac2012-08-03 12:35:13 -040013671
James Smartd74a89a2019-05-21 17:48:55 -070013672 if (count == LPFC_NVMET_CQ_NOTIFY)
13673 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13674
James Smart32517fc2019-01-28 11:14:33 -080013675 cqe = lpfc_sli4_cq_get(cq);
13676 }
13677 if (count >= phba->cfg_cq_poll_threshold) {
13678 *delay = 1;
13679 arm = false;
13680 }
13681
13682 /* Track the max number of CQEs processed in 1 EQ */
13683 if (count > cq->CQ_max_cqe)
13684 cq->CQ_max_cqe = count;
13685
13686 cq->assoc_qp->EQ_cqe_cnt += count;
13687
13688 /* Catch the no cq entry condition */
13689 if (unlikely(count == 0))
13690 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13691 "0369 No entry from completion queue "
13692 "qid=%d\n", cq->queue_id);
13693
Dick Kennedy164ba8d2020-05-01 14:43:03 -070013694 xchg(&cq->queue_claimed, 0);
James Smart32517fc2019-01-28 11:14:33 -080013695
13696rearm_and_exit:
13697 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13698 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13699
13700 return workposted;
13701}
13702
13703/**
13704 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13705 * @cq: pointer to CQ to process
13706 *
13707 * This routine calls the cq processing routine with a handler specific
13708 * to the type of queue bound to it.
13709 *
13710 * The CQ routine returns two values: the first is the calling status,
13711 * which indicates whether work was queued to the background discovery
13712 * thread. If true, the routine should wakeup the discovery thread;
13713 * the second is the delay parameter. If non-zero, rather than rearming
13714 * the CQ and yet another interrupt, the CQ handler should be queued so
13715 * that it is processed in a subsequent polling action. The value of
13716 * the delay indicates when to reschedule it.
13717 **/
13718static void
13719__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13720{
13721 struct lpfc_hba *phba = cq->phba;
13722 unsigned long delay;
13723 bool workposted = false;
13724
13725 /* Process and rearm the CQ */
13726 switch (cq->type) {
13727 case LPFC_MCQ:
13728 workposted |= __lpfc_sli4_process_cq(phba, cq,
13729 lpfc_sli4_sp_handle_mcqe,
13730 &delay);
13731 break;
13732 case LPFC_WCQ:
James Smartc00f62e2019-08-14 16:57:11 -070013733 if (cq->subtype == LPFC_IO)
James Smart32517fc2019-01-28 11:14:33 -080013734 workposted |= __lpfc_sli4_process_cq(phba, cq,
13735 lpfc_sli4_fp_handle_cqe,
13736 &delay);
13737 else
13738 workposted |= __lpfc_sli4_process_cq(phba, cq,
13739 lpfc_sli4_sp_handle_cqe,
13740 &delay);
James Smart4f774512009-05-22 14:52:35 -040013741 break;
13742 default:
13743 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13744 "0370 Invalid completion queue type (%d)\n",
13745 cq->type);
Dick Kennedyf485c182017-09-29 17:34:34 -070013746 return;
James Smart4f774512009-05-22 14:52:35 -040013747 }
13748
James Smart32517fc2019-01-28 11:14:33 -080013749 if (delay) {
13750 if (!queue_delayed_work_on(cq->chann, phba->wq,
13751 &cq->sched_spwork, delay))
13752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13753 "0394 Cannot schedule soft IRQ "
13754 "for cqid=%d on CPU %d\n",
13755 cq->queue_id, cq->chann);
13756 }
James Smart4f774512009-05-22 14:52:35 -040013757
13758 /* wake up worker thread if there are works to be done */
13759 if (workposted)
13760 lpfc_worker_wake_up(phba);
13761}
13762
13763/**
James Smart32517fc2019-01-28 11:14:33 -080013764 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13765 * interrupt
13766 * @work: pointer to work element
13767 *
13768 * translates from the work handler and calls the slow-path handler.
13769 **/
13770static void
13771lpfc_sli4_sp_process_cq(struct work_struct *work)
13772{
13773 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13774
13775 __lpfc_sli4_sp_process_cq(cq);
13776}
13777
13778/**
13779 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13780 * @work: pointer to work element
13781 *
13782 * translates from the work handler and calls the slow-path handler.
13783 **/
13784static void
13785lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13786{
13787 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13788 struct lpfc_queue, sched_spwork);
13789
13790 __lpfc_sli4_sp_process_cq(cq);
13791}
13792
13793/**
James Smart4f774512009-05-22 14:52:35 -040013794 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
James Smart2a76a282012-08-03 12:35:54 -040013795 * @phba: Pointer to HBA context object.
13796 * @cq: Pointer to associated CQ
13797 * @wcqe: Pointer to work-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040013798 *
13799 * This routine process a fast-path work queue completion entry from fast-path
13800 * event queue for FCP command response completion.
13801 **/
13802static void
James Smart2a76a282012-08-03 12:35:54 -040013803lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013804 struct lpfc_wcqe_complete *wcqe)
13805{
James Smart2a76a282012-08-03 12:35:54 -040013806 struct lpfc_sli_ring *pring = cq->pring;
James Smart4f774512009-05-22 14:52:35 -040013807 struct lpfc_iocbq *cmdiocbq;
13808 struct lpfc_iocbq irspiocbq;
13809 unsigned long iflags;
13810
James Smart4f774512009-05-22 14:52:35 -040013811 /* Check for response status */
13812 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13813 /* If resource errors reported from HBA, reduce queue
13814 * depth of the SCSI device.
13815 */
James Smarte3d2b802012-08-14 14:25:43 -040013816 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13817 IOSTAT_LOCAL_REJECT)) &&
13818 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13819 IOERR_NO_RESOURCES))
James Smart4f774512009-05-22 14:52:35 -040013820 phba->lpfc_rampdown_queue_depth(phba);
James Smarte3d2b802012-08-14 14:25:43 -040013821
James Smart4f774512009-05-22 14:52:35 -040013822 /* Log the error status */
James Smart11f0e342018-05-04 20:37:57 -070013823 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13824 "0373 FCP CQE error: status=x%x: "
13825 "CQE: %08x %08x %08x %08x\n",
James Smart4f774512009-05-22 14:52:35 -040013826 bf_get(lpfc_wcqe_c_status, wcqe),
James Smart11f0e342018-05-04 20:37:57 -070013827 wcqe->word0, wcqe->total_data_placed,
13828 wcqe->parameter, wcqe->word3);
James Smart4f774512009-05-22 14:52:35 -040013829 }
13830
13831 /* Look up the FCP command IOCB and create pseudo response IOCB */
James Smart7e56aa22012-08-03 12:35:34 -040013832 spin_lock_irqsave(&pring->ring_lock, iflags);
13833 pring->stats.iocb_event++;
James Smarte2a8be52019-05-06 17:26:47 -070013834 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013835 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13836 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart4f774512009-05-22 14:52:35 -040013837 if (unlikely(!cmdiocbq)) {
13838 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13839 "0374 FCP complete with no corresponding "
13840 "cmdiocb: iotag (%d)\n",
13841 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13842 return;
13843 }
Dick Kennedyc8a4ce02017-09-29 17:34:33 -070013844#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13845 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13846#endif
James Smart895427b2017-02-12 13:52:30 -080013847 if (cmdiocbq->iocb_cmpl == NULL) {
13848 if (cmdiocbq->wqe_cmpl) {
13849 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13850 spin_lock_irqsave(&phba->hbalock, iflags);
13851 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13852 spin_unlock_irqrestore(&phba->hbalock, iflags);
13853 }
13854
13855 /* Pass the cmd_iocb and the wcqe to the upper layer */
13856 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13857 return;
13858 }
James Smart4f774512009-05-22 14:52:35 -040013859 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13860 "0375 FCP cmdiocb not callback function "
13861 "iotag: (%d)\n",
13862 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13863 return;
13864 }
13865
13866 /* Fake the irspiocb and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050013867 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
James Smart4f774512009-05-22 14:52:35 -040013868
James Smart0f65ff62010-02-26 14:14:23 -050013869 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13870 spin_lock_irqsave(&phba->hbalock, iflags);
13871 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13872 spin_unlock_irqrestore(&phba->hbalock, iflags);
13873 }
13874
James Smart4f774512009-05-22 14:52:35 -040013875 /* Pass the cmd_iocb and the rsp state to the upper layer */
13876 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13877}
13878
13879/**
13880 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13881 * @phba: Pointer to HBA context object.
13882 * @cq: Pointer to completion queue.
13883 * @wcqe: Pointer to work-queue completion queue entry.
13884 *
Masahiro Yamada3f8b6fb2017-02-27 14:29:25 -080013885 * This routine handles an fast-path WQ entry consumed event by invoking the
James Smart4f774512009-05-22 14:52:35 -040013886 * proper WQ release routine to the slow-path WQ.
13887 **/
13888static void
13889lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13890 struct lpfc_wcqe_release *wcqe)
13891{
13892 struct lpfc_queue *childwq;
13893 bool wqid_matched = false;
James Smart895427b2017-02-12 13:52:30 -080013894 uint16_t hba_wqid;
James Smart4f774512009-05-22 14:52:35 -040013895
13896 /* Check for fast-path FCP work queue release */
James Smart895427b2017-02-12 13:52:30 -080013897 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
James Smart4f774512009-05-22 14:52:35 -040013898 list_for_each_entry(childwq, &cq->child_list, list) {
James Smart895427b2017-02-12 13:52:30 -080013899 if (childwq->queue_id == hba_wqid) {
James Smart4f774512009-05-22 14:52:35 -040013900 lpfc_sli4_wq_release(childwq,
13901 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
James Smart6e8e1c12018-01-30 15:58:49 -080013902 if (childwq->q_flag & HBA_NVMET_WQFULL)
13903 lpfc_nvmet_wqfull_process(phba, childwq);
James Smart4f774512009-05-22 14:52:35 -040013904 wqid_matched = true;
13905 break;
13906 }
13907 }
13908 /* Report warning log message if no match found */
13909 if (wqid_matched != true)
13910 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13911 "2580 Fast-path wqe consume event carries "
James Smart895427b2017-02-12 13:52:30 -080013912 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
James Smart4f774512009-05-22 14:52:35 -040013913}
13914
13915/**
James Smart2d7dbc42017-02-12 13:52:35 -080013916 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13917 * @phba: Pointer to HBA context object.
13918 * @rcqe: Pointer to receive-queue completion queue entry.
13919 *
13920 * This routine process a receive-queue completion queue entry.
13921 *
13922 * Return: true if work posted to worker thread, otherwise false.
13923 **/
13924static bool
13925lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13926 struct lpfc_rcqe *rcqe)
13927{
13928 bool workposted = false;
13929 struct lpfc_queue *hrq;
13930 struct lpfc_queue *drq;
13931 struct rqb_dmabuf *dma_buf;
13932 struct fc_frame_header *fc_hdr;
James Smart547077a2017-05-15 15:20:40 -070013933 struct lpfc_nvmet_tgtport *tgtp;
James Smart2d7dbc42017-02-12 13:52:35 -080013934 uint32_t status, rq_id;
13935 unsigned long iflags;
13936 uint32_t fctl, idx;
13937
13938 if ((phba->nvmet_support == 0) ||
13939 (phba->sli4_hba.nvmet_cqset == NULL))
13940 return workposted;
13941
13942 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13943 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13944 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13945
13946 /* sanity check on queue memory */
13947 if (unlikely(!hrq) || unlikely(!drq))
13948 return workposted;
13949
13950 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13951 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13952 else
13953 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13954
13955 if ((phba->nvmet_support == 0) ||
13956 (rq_id != hrq->queue_id))
13957 return workposted;
13958
13959 status = bf_get(lpfc_rcqe_status, rcqe);
13960 switch (status) {
13961 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13963 "6126 Receive Frame Truncated!!\n");
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060013964 /* fall through */
James Smart2d7dbc42017-02-12 13:52:35 -080013965 case FC_STATUS_RQ_SUCCESS:
James Smart2d7dbc42017-02-12 13:52:35 -080013966 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartcbc5de12017-12-08 17:18:04 -080013967 lpfc_sli4_rq_release(hrq, drq);
James Smart2d7dbc42017-02-12 13:52:35 -080013968 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13969 if (!dma_buf) {
13970 hrq->RQ_no_buf_found++;
13971 spin_unlock_irqrestore(&phba->hbalock, iflags);
13972 goto out;
13973 }
13974 spin_unlock_irqrestore(&phba->hbalock, iflags);
13975 hrq->RQ_rcv_buf++;
James Smart547077a2017-05-15 15:20:40 -070013976 hrq->RQ_buf_posted--;
James Smart2d7dbc42017-02-12 13:52:35 -080013977 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13978
13979 /* Just some basic sanity checks on FCP Command frame */
13980 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13981 fc_hdr->fh_f_ctl[1] << 8 |
13982 fc_hdr->fh_f_ctl[2]);
13983 if (((fctl &
13984 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13985 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13986 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13987 goto drop;
13988
13989 if (fc_hdr->fh_type == FC_TYPE_FCP) {
James Smartd74a89a2019-05-21 17:48:55 -070013990 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
James Smartd613b6a2017-02-12 13:52:37 -080013991 lpfc_nvmet_unsol_fcp_event(
James Smartd74a89a2019-05-21 17:48:55 -070013992 phba, idx, dma_buf, cq->isr_timestamp,
13993 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
James Smart2d7dbc42017-02-12 13:52:35 -080013994 return false;
13995 }
13996drop:
James Smart22b738a2019-03-12 16:30:11 -070013997 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
James Smart2d7dbc42017-02-12 13:52:35 -080013998 break;
James Smart2d7dbc42017-02-12 13:52:35 -080013999 case FC_STATUS_INSUFF_BUF_FRM_DISC:
James Smart547077a2017-05-15 15:20:40 -070014000 if (phba->nvmet_support) {
14001 tgtp = phba->targetport->private;
14002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14003 "6401 RQE Error x%x, posted %d err_cnt "
14004 "%d: %x %x %x\n",
14005 status, hrq->RQ_buf_posted,
14006 hrq->RQ_no_posted_buf,
14007 atomic_read(&tgtp->rcv_fcp_cmd_in),
14008 atomic_read(&tgtp->rcv_fcp_cmd_out),
14009 atomic_read(&tgtp->xmt_fcp_release));
14010 }
14011 /* fallthrough */
14012
14013 case FC_STATUS_INSUFF_BUF_NEED_BUF:
James Smart2d7dbc42017-02-12 13:52:35 -080014014 hrq->RQ_no_posted_buf++;
14015 /* Post more buffers if possible */
James Smart2d7dbc42017-02-12 13:52:35 -080014016 break;
14017 }
14018out:
14019 return workposted;
14020}
14021
14022/**
James Smart895427b2017-02-12 13:52:30 -080014023 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
James Smart32517fc2019-01-28 11:14:33 -080014024 * @phba: adapter with cq
James Smart4f774512009-05-22 14:52:35 -040014025 * @cq: Pointer to the completion queue.
14026 * @eqe: Pointer to fast-path completion queue entry.
14027 *
14028 * This routine process a fast-path work queue completion entry from fast-path
14029 * event queue for FCP command response completion.
James Smart32517fc2019-01-28 11:14:33 -080014030 *
14031 * Return: true if work posted to worker thread, otherwise false.
James Smart4f774512009-05-22 14:52:35 -040014032 **/
James Smart32517fc2019-01-28 11:14:33 -080014033static bool
James Smart895427b2017-02-12 13:52:30 -080014034lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040014035 struct lpfc_cqe *cqe)
14036{
14037 struct lpfc_wcqe_release wcqe;
14038 bool workposted = false;
14039
14040 /* Copy the work queue CQE and convert endian order if needed */
James Smart48f8fdb2018-05-04 20:37:51 -070014041 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040014042
14043 /* Check and process for different type of WCQE and dispatch */
14044 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14045 case CQE_CODE_COMPL_WQE:
James Smart895427b2017-02-12 13:52:30 -080014046 case CQE_CODE_NVME_ERSP:
James Smartb84daac2012-08-03 12:35:13 -040014047 cq->CQ_wq++;
James Smart4f774512009-05-22 14:52:35 -040014048 /* Process the WQ complete event */
James Smart98fc5dd2010-06-07 15:24:29 -040014049 phba->last_completion_time = jiffies;
James Smartc00f62e2019-08-14 16:57:11 -070014050 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
James Smart895427b2017-02-12 13:52:30 -080014051 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
James Smart4f774512009-05-22 14:52:35 -040014052 (struct lpfc_wcqe_complete *)&wcqe);
14053 break;
14054 case CQE_CODE_RELEASE_WQE:
James Smartb84daac2012-08-03 12:35:13 -040014055 cq->CQ_release_wqe++;
James Smart4f774512009-05-22 14:52:35 -040014056 /* Process the WQ release event */
14057 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14058 (struct lpfc_wcqe_release *)&wcqe);
14059 break;
14060 case CQE_CODE_XRI_ABORTED:
James Smartb84daac2012-08-03 12:35:13 -040014061 cq->CQ_xri_aborted++;
James Smart4f774512009-05-22 14:52:35 -040014062 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040014063 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040014064 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14065 (struct sli4_wcqe_xri_aborted *)&wcqe);
14066 break;
James Smart895427b2017-02-12 13:52:30 -080014067 case CQE_CODE_RECEIVE_V1:
14068 case CQE_CODE_RECEIVE:
14069 phba->last_completion_time = jiffies;
James Smart2d7dbc42017-02-12 13:52:35 -080014070 if (cq->subtype == LPFC_NVMET) {
14071 workposted = lpfc_sli4_nvmet_handle_rcqe(
14072 phba, cq, (struct lpfc_rcqe *)&wcqe);
14073 }
James Smart895427b2017-02-12 13:52:30 -080014074 break;
James Smart4f774512009-05-22 14:52:35 -040014075 default:
14076 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -080014077 "0144 Not a valid CQE code: x%x\n",
James Smart4f774512009-05-22 14:52:35 -040014078 bf_get(lpfc_wcqe_c_code, &wcqe));
14079 break;
14080 }
14081 return workposted;
14082}
14083
14084/**
James Smart67d12732012-08-03 12:36:13 -040014085 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
James Smart4f774512009-05-22 14:52:35 -040014086 * @phba: Pointer to HBA context object.
14087 * @eqe: Pointer to fast-path event queue entry.
14088 *
14089 * This routine process a event queue entry from the fast-path event queue.
14090 * It will check the MajorCode and MinorCode to determine this is for a
14091 * completion event on a completion queue, if not, an error shall be logged
14092 * and just return. Otherwise, it will get to the corresponding completion
14093 * queue and process all the entries on the completion queue, rearm the
14094 * completion queue, and then return.
14095 **/
Dick Kennedyf485c182017-09-29 17:34:34 -070014096static void
James Smart32517fc2019-01-28 11:14:33 -080014097lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14098 struct lpfc_eqe *eqe)
James Smart4f774512009-05-22 14:52:35 -040014099{
James Smart895427b2017-02-12 13:52:30 -080014100 struct lpfc_queue *cq = NULL;
James Smart32517fc2019-01-28 11:14:33 -080014101 uint32_t qidx = eq->hdwq;
James Smart2d7dbc42017-02-12 13:52:35 -080014102 uint16_t cqid, id;
James Smart4f774512009-05-22 14:52:35 -040014103
James Smartcb5172e2010-03-15 11:25:07 -040014104 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
James Smart4f774512009-05-22 14:52:35 -040014105 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart67d12732012-08-03 12:36:13 -040014106 "0366 Not a valid completion "
James Smart4f774512009-05-22 14:52:35 -040014107 "event: majorcode=x%x, minorcode=x%x\n",
James Smartcb5172e2010-03-15 11:25:07 -040014108 bf_get_le32(lpfc_eqe_major_code, eqe),
14109 bf_get_le32(lpfc_eqe_minor_code, eqe));
Dick Kennedyf485c182017-09-29 17:34:34 -070014110 return;
James Smart4f774512009-05-22 14:52:35 -040014111 }
14112
James Smart67d12732012-08-03 12:36:13 -040014113 /* Get the reference to the corresponding CQ */
14114 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14115
James Smart6a828b02019-01-28 11:14:31 -080014116 /* Use the fast lookup method first */
14117 if (cqid <= phba->sli4_hba.cq_max) {
14118 cq = phba->sli4_hba.cq_lookup[cqid];
14119 if (cq)
14120 goto work_cq;
James Smartcdb42be2019-01-28 11:14:21 -080014121 }
14122
14123 /* Next check for NVMET completion */
James Smart2d7dbc42017-02-12 13:52:35 -080014124 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14125 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14126 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14127 /* Process NVMET unsol rcv */
14128 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14129 goto process_cq;
14130 }
14131 }
14132
James Smart895427b2017-02-12 13:52:30 -080014133 if (phba->sli4_hba.nvmels_cq &&
14134 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14135 /* Process NVME unsol rcv */
14136 cq = phba->sli4_hba.nvmels_cq;
14137 }
14138
14139 /* Otherwise this is a Slow path event */
14140 if (cq == NULL) {
James Smartcdb42be2019-01-28 11:14:21 -080014141 lpfc_sli4_sp_handle_eqe(phba, eqe,
14142 phba->sli4_hba.hdwq[qidx].hba_eq);
Dick Kennedyf485c182017-09-29 17:34:34 -070014143 return;
James Smart67d12732012-08-03 12:36:13 -040014144 }
14145
James Smart895427b2017-02-12 13:52:30 -080014146process_cq:
James Smart4f774512009-05-22 14:52:35 -040014147 if (unlikely(cqid != cq->queue_id)) {
14148 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14149 "0368 Miss-matched fast-path completion "
14150 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14151 cqid, cq->queue_id);
Dick Kennedyf485c182017-09-29 17:34:34 -070014152 return;
James Smart4f774512009-05-22 14:52:35 -040014153 }
14154
James Smart6a828b02019-01-28 11:14:31 -080014155work_cq:
James Smartd74a89a2019-05-21 17:48:55 -070014156#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14157 if (phba->ktime_on)
14158 cq->isr_timestamp = ktime_get_ns();
14159 else
14160 cq->isr_timestamp = 0;
14161#endif
James Smart45aa3122019-01-28 11:14:29 -080014162 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
Dick Kennedyf485c182017-09-29 17:34:34 -070014163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14164 "0363 Cannot schedule soft IRQ "
14165 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
Bart Van Assched6d189c2019-03-28 11:06:22 -070014166 cqid, cq->queue_id, raw_smp_processor_id());
Dick Kennedyf485c182017-09-29 17:34:34 -070014167}
14168
14169/**
James Smart32517fc2019-01-28 11:14:33 -080014170 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14171 * @cq: Pointer to CQ to be processed
Dick Kennedyf485c182017-09-29 17:34:34 -070014172 *
James Smart32517fc2019-01-28 11:14:33 -080014173 * This routine calls the cq processing routine with the handler for
14174 * fast path CQEs.
14175 *
14176 * The CQ routine returns two values: the first is the calling status,
14177 * which indicates whether work was queued to the background discovery
14178 * thread. If true, the routine should wakeup the discovery thread;
14179 * the second is the delay parameter. If non-zero, rather than rearming
14180 * the CQ and yet another interrupt, the CQ handler should be queued so
14181 * that it is processed in a subsequent polling action. The value of
14182 * the delay indicates when to reschedule it.
Dick Kennedyf485c182017-09-29 17:34:34 -070014183 **/
14184static void
James Smart32517fc2019-01-28 11:14:33 -080014185__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
Dick Kennedyf485c182017-09-29 17:34:34 -070014186{
Dick Kennedyf485c182017-09-29 17:34:34 -070014187 struct lpfc_hba *phba = cq->phba;
James Smart32517fc2019-01-28 11:14:33 -080014188 unsigned long delay;
Dick Kennedyf485c182017-09-29 17:34:34 -070014189 bool workposted = false;
Dick Kennedyf485c182017-09-29 17:34:34 -070014190
James Smart32517fc2019-01-28 11:14:33 -080014191 /* process and rearm the CQ */
14192 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14193 &delay);
14194
14195 if (delay) {
14196 if (!queue_delayed_work_on(cq->chann, phba->wq,
14197 &cq->sched_irqwork, delay))
14198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14199 "0367 Cannot schedule soft IRQ "
14200 "for cqid=%d on CPU %d\n",
14201 cq->queue_id, cq->chann);
James Smart4f774512009-05-22 14:52:35 -040014202 }
14203
James Smart4f774512009-05-22 14:52:35 -040014204 /* wake up worker thread if there are works to be done */
14205 if (workposted)
14206 lpfc_worker_wake_up(phba);
14207}
14208
James Smart1ba981f2014-02-20 09:56:45 -050014209/**
James Smart32517fc2019-01-28 11:14:33 -080014210 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14211 * interrupt
14212 * @work: pointer to work element
James Smart1ba981f2014-02-20 09:56:45 -050014213 *
James Smart32517fc2019-01-28 11:14:33 -080014214 * translates from the work handler and calls the fast-path handler.
James Smart1ba981f2014-02-20 09:56:45 -050014215 **/
14216static void
James Smart32517fc2019-01-28 11:14:33 -080014217lpfc_sli4_hba_process_cq(struct work_struct *work)
James Smart1ba981f2014-02-20 09:56:45 -050014218{
James Smart32517fc2019-01-28 11:14:33 -080014219 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
James Smart1ba981f2014-02-20 09:56:45 -050014220
James Smart32517fc2019-01-28 11:14:33 -080014221 __lpfc_sli4_hba_process_cq(cq);
James Smart1ba981f2014-02-20 09:56:45 -050014222}
14223
14224/**
James Smart32517fc2019-01-28 11:14:33 -080014225 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14226 * @work: pointer to work element
James Smart1ba981f2014-02-20 09:56:45 -050014227 *
James Smart32517fc2019-01-28 11:14:33 -080014228 * translates from the work handler and calls the fast-path handler.
James Smart1ba981f2014-02-20 09:56:45 -050014229 **/
James Smart32517fc2019-01-28 11:14:33 -080014230static void
14231lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
James Smart1ba981f2014-02-20 09:56:45 -050014232{
James Smart32517fc2019-01-28 11:14:33 -080014233 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14234 struct lpfc_queue, sched_irqwork);
James Smart1ba981f2014-02-20 09:56:45 -050014235
James Smart32517fc2019-01-28 11:14:33 -080014236 __lpfc_sli4_hba_process_cq(cq);
James Smart1ba981f2014-02-20 09:56:45 -050014237}
14238
James Smart4f774512009-05-22 14:52:35 -040014239/**
James Smart67d12732012-08-03 12:36:13 -040014240 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
James Smart4f774512009-05-22 14:52:35 -040014241 * @irq: Interrupt number.
14242 * @dev_id: The device context pointer.
14243 *
14244 * This function is directly called from the PCI layer as an interrupt
14245 * service routine when device with SLI-4 interface spec is enabled with
14246 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14247 * ring event in the HBA. However, when the device is enabled with either
14248 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14249 * device-level interrupt handler. When the PCI slot is in error recovery
14250 * or the HBA is undergoing initialization, the interrupt handler will not
14251 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14252 * the intrrupt context. This function is called without any lock held.
14253 * It gets the hbalock to access and update SLI data structures. Note that,
14254 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14255 * equal to that of FCP CQ index.
14256 *
James Smart67d12732012-08-03 12:36:13 -040014257 * The link attention and ELS ring attention events are handled
14258 * by the worker thread. The interrupt handler signals the worker thread
14259 * and returns for these events. This function is called without any lock
14260 * held. It gets the hbalock to access and update SLI data structures.
14261 *
James Smart4f774512009-05-22 14:52:35 -040014262 * This function returns IRQ_HANDLED when interrupt is handled else it
14263 * returns IRQ_NONE.
14264 **/
14265irqreturn_t
James Smart67d12732012-08-03 12:36:13 -040014266lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
James Smart4f774512009-05-22 14:52:35 -040014267{
14268 struct lpfc_hba *phba;
James Smart895427b2017-02-12 13:52:30 -080014269 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart4f774512009-05-22 14:52:35 -040014270 struct lpfc_queue *fpeq;
James Smart4f774512009-05-22 14:52:35 -040014271 unsigned long iflag;
14272 int ecount = 0;
James Smart895427b2017-02-12 13:52:30 -080014273 int hba_eqidx;
James Smart32517fc2019-01-28 11:14:33 -080014274 struct lpfc_eq_intr_info *eqi;
14275 uint32_t icnt;
James Smart4f774512009-05-22 14:52:35 -040014276
14277 /* Get the driver's phba structure from the dev_id */
James Smart895427b2017-02-12 13:52:30 -080014278 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14279 phba = hba_eq_hdl->phba;
14280 hba_eqidx = hba_eq_hdl->idx;
James Smart4f774512009-05-22 14:52:35 -040014281
14282 if (unlikely(!phba))
14283 return IRQ_NONE;
James Smartcdb42be2019-01-28 11:14:21 -080014284 if (unlikely(!phba->sli4_hba.hdwq))
James Smart5350d872011-10-10 21:33:49 -040014285 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040014286
14287 /* Get to the EQ struct associated with this vector */
James Smart657add42019-05-21 17:49:06 -070014288 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
James Smart2e90f4b2011-12-13 13:22:37 -050014289 if (unlikely(!fpeq))
14290 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040014291
14292 /* Check device state for handling interrupt */
14293 if (unlikely(lpfc_intr_state_check(phba))) {
14294 /* Check again for link_state with lock held */
14295 spin_lock_irqsave(&phba->hbalock, iflag);
14296 if (phba->link_state < LPFC_LINK_DOWN)
14297 /* Flush, clear interrupt, and rearm the EQ */
James Smart24c7c0a2019-09-21 20:58:58 -070014298 lpfc_sli4_eqcq_flush(phba, fpeq);
James Smart4f774512009-05-22 14:52:35 -040014299 spin_unlock_irqrestore(&phba->hbalock, iflag);
14300 return IRQ_NONE;
14301 }
14302
James Smart32517fc2019-01-28 11:14:33 -080014303 eqi = phba->sli4_hba.eq_info;
14304 icnt = this_cpu_inc_return(eqi->icnt);
Bart Van Assched6d189c2019-03-28 11:06:22 -070014305 fpeq->last_cpu = raw_smp_processor_id();
James Smart4f774512009-05-22 14:52:35 -040014306
James Smart32517fc2019-01-28 11:14:33 -080014307 if (icnt > LPFC_EQD_ISR_TRIGGER &&
James Smart8156d372019-10-18 14:18:26 -070014308 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
James Smart32517fc2019-01-28 11:14:33 -080014309 phba->cfg_auto_imax &&
14310 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14311 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14312 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
James Smartb84daac2012-08-03 12:35:13 -040014313
James Smart32517fc2019-01-28 11:14:33 -080014314 /* process and rearm the EQ */
James Smart93a4d6f2019-11-04 16:57:05 -080014315 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
James Smart4f774512009-05-22 14:52:35 -040014316
14317 if (unlikely(ecount == 0)) {
James Smartb84daac2012-08-03 12:35:13 -040014318 fpeq->EQ_no_entry++;
James Smart4f774512009-05-22 14:52:35 -040014319 if (phba->intr_type == MSIX)
14320 /* MSI-X treated interrupt served as no EQ share INT */
14321 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14322 "0358 MSI-X interrupt with no EQE\n");
14323 else
14324 /* Non MSI-X treated on interrupt as EQ share INT */
14325 return IRQ_NONE;
14326 }
14327
14328 return IRQ_HANDLED;
14329} /* lpfc_sli4_fp_intr_handler */
14330
14331/**
14332 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14333 * @irq: Interrupt number.
14334 * @dev_id: The device context pointer.
14335 *
14336 * This function is the device-level interrupt handler to device with SLI-4
14337 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14338 * interrupt mode is enabled and there is an event in the HBA which requires
14339 * driver attention. This function invokes the slow-path interrupt attention
14340 * handling function and fast-path interrupt attention handling function in
14341 * turn to process the relevant HBA attention events. This function is called
14342 * without any lock held. It gets the hbalock to access and update SLI data
14343 * structures.
14344 *
14345 * This function returns IRQ_HANDLED when interrupt is handled, else it
14346 * returns IRQ_NONE.
14347 **/
14348irqreturn_t
14349lpfc_sli4_intr_handler(int irq, void *dev_id)
14350{
14351 struct lpfc_hba *phba;
James Smart67d12732012-08-03 12:36:13 -040014352 irqreturn_t hba_irq_rc;
14353 bool hba_handled = false;
James Smart895427b2017-02-12 13:52:30 -080014354 int qidx;
James Smart4f774512009-05-22 14:52:35 -040014355
14356 /* Get the driver's phba structure from the dev_id */
14357 phba = (struct lpfc_hba *)dev_id;
14358
14359 if (unlikely(!phba))
14360 return IRQ_NONE;
14361
14362 /*
James Smart4f774512009-05-22 14:52:35 -040014363 * Invoke fast-path host attention interrupt handling as appropriate.
14364 */
James Smart6a828b02019-01-28 11:14:31 -080014365 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
James Smart67d12732012-08-03 12:36:13 -040014366 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
James Smart895427b2017-02-12 13:52:30 -080014367 &phba->sli4_hba.hba_eq_hdl[qidx]);
James Smart67d12732012-08-03 12:36:13 -040014368 if (hba_irq_rc == IRQ_HANDLED)
14369 hba_handled |= true;
James Smart4f774512009-05-22 14:52:35 -040014370 }
14371
James Smart67d12732012-08-03 12:36:13 -040014372 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040014373} /* lpfc_sli4_intr_handler */
14374
James Smart93a4d6f2019-11-04 16:57:05 -080014375void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14376{
14377 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14378 struct lpfc_queue *eq;
14379 int i = 0;
14380
14381 rcu_read_lock();
14382
14383 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14384 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14385 if (!list_empty(&phba->poll_list))
14386 mod_timer(&phba->cpuhp_poll_timer,
14387 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14388
14389 rcu_read_unlock();
14390}
14391
14392inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14393{
14394 struct lpfc_hba *phba = eq->phba;
14395 int i = 0;
14396
14397 /*
14398 * Unlocking an irq is one of the entry point to check
14399 * for re-schedule, but we are good for io submission
14400 * path as midlayer does a get_cpu to glue us in. Flush
14401 * out the invalidate queue so we can see the updated
14402 * value for flag.
14403 */
14404 smp_rmb();
14405
14406 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14407 /* We will not likely get the completion for the caller
14408 * during this iteration but i guess that's fine.
14409 * Future io's coming on this eq should be able to
14410 * pick it up. As for the case of single io's, they
14411 * will be handled through a sched from polling timer
14412 * function which is currently triggered every 1msec.
14413 */
14414 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14415
14416 return i;
14417}
14418
14419static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14420{
14421 struct lpfc_hba *phba = eq->phba;
14422
James Smartf861f592020-03-22 11:12:54 -070014423 /* kickstart slowpath processing if needed */
14424 if (list_empty(&phba->poll_list))
James Smart93a4d6f2019-11-04 16:57:05 -080014425 mod_timer(&phba->cpuhp_poll_timer,
14426 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
James Smart93a4d6f2019-11-04 16:57:05 -080014427
14428 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14429 synchronize_rcu();
14430}
14431
14432static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14433{
14434 struct lpfc_hba *phba = eq->phba;
14435
14436 /* Disable slowpath processing for this eq. Kick start the eq
14437 * by RE-ARMING the eq's ASAP
14438 */
14439 list_del_rcu(&eq->_poll_list);
14440 synchronize_rcu();
14441
14442 if (list_empty(&phba->poll_list))
14443 del_timer_sync(&phba->cpuhp_poll_timer);
14444}
14445
James Smartd480e572019-11-11 15:03:58 -080014446void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
James Smart93a4d6f2019-11-04 16:57:05 -080014447{
14448 struct lpfc_queue *eq, *next;
14449
14450 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14451 list_del(&eq->_poll_list);
14452
14453 INIT_LIST_HEAD(&phba->poll_list);
14454 synchronize_rcu();
14455}
14456
14457static inline void
14458__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14459{
14460 if (mode == eq->mode)
14461 return;
14462 /*
14463 * currently this function is only called during a hotplug
14464 * event and the cpu on which this function is executing
14465 * is going offline. By now the hotplug has instructed
14466 * the scheduler to remove this cpu from cpu active mask.
14467 * So we don't need to work about being put aside by the
14468 * scheduler for a high priority process. Yes, the inte-
14469 * rrupts could come but they are known to retire ASAP.
14470 */
14471
14472 /* Disable polling in the fastpath */
14473 WRITE_ONCE(eq->mode, mode);
14474 /* flush out the store buffer */
14475 smp_wmb();
14476
14477 /*
14478 * Add this eq to the polling list and start polling. For
14479 * a grace period both interrupt handler and poller will
14480 * try to process the eq _but_ that's fine. We have a
14481 * synchronization mechanism in place (queue_claimed) to
14482 * deal with it. This is just a draining phase for int-
14483 * errupt handler (not eq's) as we have guranteed through
14484 * barrier that all the CPUs have seen the new CQ_POLLED
14485 * state. which will effectively disable the REARMING of
14486 * the EQ. The whole idea is eq's die off eventually as
14487 * we are not rearming EQ's anymore.
14488 */
14489 mode ? lpfc_sli4_add_to_poll_list(eq) :
14490 lpfc_sli4_remove_from_poll_list(eq);
14491}
14492
14493void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14494{
14495 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14496}
14497
14498void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14499{
14500 struct lpfc_hba *phba = eq->phba;
14501
14502 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14503
14504 /* Kick start for the pending io's in h/w.
14505 * Once we switch back to interrupt processing on a eq
14506 * the io path completion will only arm eq's when it
14507 * receives a completion. But since eq's are in disa-
14508 * rmed state it doesn't receive a completion. This
14509 * creates a deadlock scenaro.
14510 */
14511 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14512}
14513
James Smart4f774512009-05-22 14:52:35 -040014514/**
14515 * lpfc_sli4_queue_free - free a queue structure and associated memory
14516 * @queue: The queue structure to free.
14517 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040014518 * This function frees a queue structure and the DMAable memory used for
James Smart4f774512009-05-22 14:52:35 -040014519 * the host resident queue. This function must be called after destroying the
14520 * queue on the HBA.
14521 **/
14522void
14523lpfc_sli4_queue_free(struct lpfc_queue *queue)
14524{
14525 struct lpfc_dmabuf *dmabuf;
14526
14527 if (!queue)
14528 return;
14529
James Smart4645f7b2019-03-12 16:30:14 -070014530 if (!list_empty(&queue->wq_list))
14531 list_del(&queue->wq_list);
14532
James Smart4f774512009-05-22 14:52:35 -040014533 while (!list_empty(&queue->page_list)) {
14534 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14535 list);
James Smart81b96ed2017-11-20 16:00:29 -080014536 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
James Smart4f774512009-05-22 14:52:35 -040014537 dmabuf->virt, dmabuf->phys);
14538 kfree(dmabuf);
14539 }
James Smart895427b2017-02-12 13:52:30 -080014540 if (queue->rqbp) {
14541 lpfc_free_rq_buffer(queue->phba, queue);
14542 kfree(queue->rqbp);
14543 }
James Smartd1f525a2017-04-21 16:04:55 -070014544
James Smart32517fc2019-01-28 11:14:33 -080014545 if (!list_empty(&queue->cpu_list))
14546 list_del(&queue->cpu_list);
14547
James Smart4f774512009-05-22 14:52:35 -040014548 kfree(queue);
14549 return;
14550}
14551
14552/**
14553 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14554 * @phba: The HBA that this queue is being created on.
James Smart81b96ed2017-11-20 16:00:29 -080014555 * @page_size: The size of a queue page
James Smart4f774512009-05-22 14:52:35 -040014556 * @entry_size: The size of each queue entry for this queue.
14557 * @entry count: The number of entries that this queue will handle.
James Smartc1a21eb2019-03-12 16:30:29 -070014558 * @cpu: The cpu that will primarily utilize this queue.
James Smart4f774512009-05-22 14:52:35 -040014559 *
14560 * This function allocates a queue structure and the DMAable memory used for
14561 * the host resident queue. This function must be called before creating the
14562 * queue on the HBA.
14563 **/
14564struct lpfc_queue *
James Smart81b96ed2017-11-20 16:00:29 -080014565lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
James Smartc1a21eb2019-03-12 16:30:29 -070014566 uint32_t entry_size, uint32_t entry_count, int cpu)
James Smart4f774512009-05-22 14:52:35 -040014567{
14568 struct lpfc_queue *queue;
14569 struct lpfc_dmabuf *dmabuf;
James Smartcb5172e2010-03-15 11:25:07 -040014570 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart9afbee32019-03-12 16:30:28 -070014571 uint16_t x, pgcnt;
James Smart4f774512009-05-22 14:52:35 -040014572
James Smartcb5172e2010-03-15 11:25:07 -040014573 if (!phba->sli4_hba.pc_sli4_params.supported)
James Smart81b96ed2017-11-20 16:00:29 -080014574 hw_page_size = page_size;
James Smartcb5172e2010-03-15 11:25:07 -040014575
James Smart9afbee32019-03-12 16:30:28 -070014576 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
James Smart895427b2017-02-12 13:52:30 -080014577
14578 /* If needed, Adjust page count to match the max the adapter supports */
James Smart9afbee32019-03-12 16:30:28 -070014579 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14580 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14581
James Smartc1a21eb2019-03-12 16:30:29 -070014582 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14583 GFP_KERNEL, cpu_to_node(cpu));
James Smart9afbee32019-03-12 16:30:28 -070014584 if (!queue)
14585 return NULL;
James Smart895427b2017-02-12 13:52:30 -080014586
James Smart4f774512009-05-22 14:52:35 -040014587 INIT_LIST_HEAD(&queue->list);
James Smart93a4d6f2019-11-04 16:57:05 -080014588 INIT_LIST_HEAD(&queue->_poll_list);
James Smart895427b2017-02-12 13:52:30 -080014589 INIT_LIST_HEAD(&queue->wq_list);
James Smart6e8e1c12018-01-30 15:58:49 -080014590 INIT_LIST_HEAD(&queue->wqfull_list);
James Smart4f774512009-05-22 14:52:35 -040014591 INIT_LIST_HEAD(&queue->page_list);
14592 INIT_LIST_HEAD(&queue->child_list);
James Smart32517fc2019-01-28 11:14:33 -080014593 INIT_LIST_HEAD(&queue->cpu_list);
James Smart81b96ed2017-11-20 16:00:29 -080014594
14595 /* Set queue parameters now. If the system cannot provide memory
14596 * resources, the free routine needs to know what was allocated.
14597 */
James Smart9afbee32019-03-12 16:30:28 -070014598 queue->page_count = pgcnt;
14599 queue->q_pgs = (void **)&queue[1];
14600 queue->entry_cnt_per_pg = hw_page_size / entry_size;
James Smart81b96ed2017-11-20 16:00:29 -080014601 queue->entry_size = entry_size;
14602 queue->entry_count = entry_count;
14603 queue->page_size = hw_page_size;
14604 queue->phba = phba;
14605
James Smart9afbee32019-03-12 16:30:28 -070014606 for (x = 0; x < queue->page_count; x++) {
James Smartc1a21eb2019-03-12 16:30:29 -070014607 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14608 dev_to_node(&phba->pcidev->dev));
James Smart4f774512009-05-22 14:52:35 -040014609 if (!dmabuf)
14610 goto out_fail;
Luis Chamberlain750afb02019-01-04 09:23:09 +010014611 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14612 hw_page_size, &dmabuf->phys,
14613 GFP_KERNEL);
James Smart4f774512009-05-22 14:52:35 -040014614 if (!dmabuf->virt) {
14615 kfree(dmabuf);
14616 goto out_fail;
14617 }
14618 dmabuf->buffer_tag = x;
14619 list_add_tail(&dmabuf->list, &queue->page_list);
James Smart9afbee32019-03-12 16:30:28 -070014620 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14621 queue->q_pgs[x] = dmabuf->virt;
James Smart4f774512009-05-22 14:52:35 -040014622 }
Dick Kennedyf485c182017-09-29 17:34:34 -070014623 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14624 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
James Smart32517fc2019-01-28 11:14:33 -080014625 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14626 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
James Smart4f774512009-05-22 14:52:35 -040014627
James Smart32517fc2019-01-28 11:14:33 -080014628 /* notify_interval will be set during q creation */
James Smart64eb4dc2017-05-15 15:20:49 -070014629
James Smart4f774512009-05-22 14:52:35 -040014630 return queue;
14631out_fail:
14632 lpfc_sli4_queue_free(queue);
14633 return NULL;
14634}
14635
14636/**
James Smart962bc512013-01-03 15:44:00 -050014637 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14638 * @phba: HBA structure that indicates port to create a queue on.
14639 * @pci_barset: PCI BAR set flag.
14640 *
14641 * This function shall perform iomap of the specified PCI BAR address to host
14642 * memory address if not already done so and return it. The returned host
14643 * memory address can be NULL.
14644 */
14645static void __iomem *
14646lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14647{
James Smart962bc512013-01-03 15:44:00 -050014648 if (!phba->pcidev)
14649 return NULL;
James Smart962bc512013-01-03 15:44:00 -050014650
14651 switch (pci_barset) {
14652 case WQ_PCI_BAR_0_AND_1:
James Smart962bc512013-01-03 15:44:00 -050014653 return phba->pci_bar0_memmap_p;
14654 case WQ_PCI_BAR_2_AND_3:
James Smart962bc512013-01-03 15:44:00 -050014655 return phba->pci_bar2_memmap_p;
14656 case WQ_PCI_BAR_4_AND_5:
James Smart962bc512013-01-03 15:44:00 -050014657 return phba->pci_bar4_memmap_p;
14658 default:
14659 break;
14660 }
14661 return NULL;
14662}
14663
14664/**
James Smartcb733e32019-01-28 11:14:32 -080014665 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14666 * @phba: HBA structure that EQs are on.
14667 * @startq: The starting EQ index to modify
14668 * @numq: The number of EQs (consecutive indexes) to modify
14669 * @usdelay: amount of delay
James Smart173edbb2012-06-12 13:54:50 -040014670 *
James Smartcb733e32019-01-28 11:14:32 -080014671 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14672 * is set either by writing to a register (if supported by the SLI Port)
14673 * or by mailbox command. The mailbox command allows several EQs to be
14674 * updated at once.
James Smart173edbb2012-06-12 13:54:50 -040014675 *
James Smartcb733e32019-01-28 11:14:32 -080014676 * The @phba struct is used to send a mailbox command to HBA. The @startq
14677 * is used to get the starting EQ index to change. The @numq value is
14678 * used to specify how many consecutive EQ indexes, starting at EQ index,
14679 * are to be changed. This function is asynchronous and will wait for any
14680 * mailbox commands to finish before returning.
James Smart173edbb2012-06-12 13:54:50 -040014681 *
James Smartcb733e32019-01-28 11:14:32 -080014682 * On success this function will return a zero. If unable to allocate
14683 * enough memory this function will return -ENOMEM. If a mailbox command
14684 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14685 * have had their delay multipler changed.
James Smart173edbb2012-06-12 13:54:50 -040014686 **/
James Smartcb733e32019-01-28 11:14:32 -080014687void
James Smart0cf07f842017-06-01 21:07:10 -070014688lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
James Smartcb733e32019-01-28 11:14:32 -080014689 uint32_t numq, uint32_t usdelay)
James Smart173edbb2012-06-12 13:54:50 -040014690{
14691 struct lpfc_mbx_modify_eq_delay *eq_delay;
14692 LPFC_MBOXQ_t *mbox;
14693 struct lpfc_queue *eq;
James Smartcb733e32019-01-28 11:14:32 -080014694 int cnt = 0, rc, length;
James Smart173edbb2012-06-12 13:54:50 -040014695 uint32_t shdr_status, shdr_add_status;
James Smartcb733e32019-01-28 11:14:32 -080014696 uint32_t dmult;
James Smart895427b2017-02-12 13:52:30 -080014697 int qidx;
James Smart173edbb2012-06-12 13:54:50 -040014698 union lpfc_sli4_cfg_shdr *shdr;
James Smart173edbb2012-06-12 13:54:50 -040014699
James Smart6a828b02019-01-28 11:14:31 -080014700 if (startq >= phba->cfg_irq_chann)
James Smartcb733e32019-01-28 11:14:32 -080014701 return;
14702
14703 if (usdelay > 0xFFFF) {
14704 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14705 "6429 usdelay %d too large. Scaled down to "
14706 "0xFFFF.\n", usdelay);
14707 usdelay = 0xFFFF;
14708 }
14709
14710 /* set values by EQ_DELAY register if supported */
14711 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14712 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
James Smart657add42019-05-21 17:49:06 -070014713 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
James Smartcb733e32019-01-28 11:14:32 -080014714 if (!eq)
14715 continue;
14716
James Smart32517fc2019-01-28 11:14:33 -080014717 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
James Smartcb733e32019-01-28 11:14:32 -080014718
14719 if (++cnt >= numq)
14720 break;
14721 }
James Smartcb733e32019-01-28 11:14:32 -080014722 return;
14723 }
14724
14725 /* Otherwise, set values by mailbox cmd */
James Smart173edbb2012-06-12 13:54:50 -040014726
14727 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smartcb733e32019-01-28 11:14:32 -080014728 if (!mbox) {
14729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14730 "6428 Failed allocating mailbox cmd buffer."
14731 " EQ delay was not set.\n");
14732 return;
14733 }
James Smart173edbb2012-06-12 13:54:50 -040014734 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14735 sizeof(struct lpfc_sli4_cfg_mhdr));
14736 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14737 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14738 length, LPFC_SLI4_MBX_EMBED);
14739 eq_delay = &mbox->u.mqe.un.eq_delay;
14740
14741 /* Calculate delay multiper from maximum interrupt per second */
James Smartcb733e32019-01-28 11:14:32 -080014742 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14743 if (dmult)
14744 dmult--;
James Smart0cf07f842017-06-01 21:07:10 -070014745 if (dmult > LPFC_DMULT_MAX)
14746 dmult = LPFC_DMULT_MAX;
James Smart173edbb2012-06-12 13:54:50 -040014747
James Smart6a828b02019-01-28 11:14:31 -080014748 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
James Smart657add42019-05-21 17:49:06 -070014749 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
James Smart173edbb2012-06-12 13:54:50 -040014750 if (!eq)
14751 continue;
James Smartcb733e32019-01-28 11:14:32 -080014752 eq->q_mode = usdelay;
James Smart173edbb2012-06-12 13:54:50 -040014753 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14754 eq_delay->u.request.eq[cnt].phase = 0;
14755 eq_delay->u.request.eq[cnt].delay_multi = dmult;
James Smart0cf07f842017-06-01 21:07:10 -070014756
James Smartcb733e32019-01-28 11:14:32 -080014757 if (++cnt >= numq)
James Smart173edbb2012-06-12 13:54:50 -040014758 break;
14759 }
14760 eq_delay->u.request.num_eq = cnt;
14761
14762 mbox->vport = phba->pport;
14763 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart3e1f0712018-11-29 16:09:29 -080014764 mbox->ctx_buf = NULL;
14765 mbox->ctx_ndlp = NULL;
James Smart173edbb2012-06-12 13:54:50 -040014766 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14767 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14768 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14769 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14770 if (shdr_status || shdr_add_status || rc) {
14771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14772 "2512 MODIFY_EQ_DELAY mailbox failed with "
14773 "status x%x add_status x%x, mbx status x%x\n",
14774 shdr_status, shdr_add_status, rc);
James Smart173edbb2012-06-12 13:54:50 -040014775 }
14776 mempool_free(mbox, phba->mbox_mem_pool);
James Smartcb733e32019-01-28 11:14:32 -080014777 return;
James Smart173edbb2012-06-12 13:54:50 -040014778}
14779
14780/**
James Smart4f774512009-05-22 14:52:35 -040014781 * lpfc_eq_create - Create an Event Queue on the HBA
14782 * @phba: HBA structure that indicates port to create a queue on.
14783 * @eq: The queue structure to use to create the event queue.
14784 * @imax: The maximum interrupt per second limit.
14785 *
14786 * This function creates an event queue, as detailed in @eq, on a port,
14787 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14788 *
14789 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14790 * is used to get the entry count and entry size that are necessary to
14791 * determine the number of pages to allocate and use for this queue. This
14792 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14793 * event queue. This function is asynchronous and will wait for the mailbox
14794 * command to finish before continuing.
14795 *
14796 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014797 * memory this function will return -ENOMEM. If the queue create mailbox command
14798 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014799 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014800int
James Smartee020062012-09-29 11:28:52 -040014801lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
James Smart4f774512009-05-22 14:52:35 -040014802{
14803 struct lpfc_mbx_eq_create *eq_create;
14804 LPFC_MBOXQ_t *mbox;
14805 int rc, length, status = 0;
14806 struct lpfc_dmabuf *dmabuf;
14807 uint32_t shdr_status, shdr_add_status;
14808 union lpfc_sli4_cfg_shdr *shdr;
14809 uint16_t dmult;
James Smart49198b32010-04-06 15:04:33 -040014810 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14811
James Smart2e90f4b2011-12-13 13:22:37 -050014812 /* sanity check on queue memory */
14813 if (!eq)
14814 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014815 if (!phba->sli4_hba.pc_sli4_params.supported)
14816 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040014817
14818 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14819 if (!mbox)
14820 return -ENOMEM;
14821 length = (sizeof(struct lpfc_mbx_eq_create) -
14822 sizeof(struct lpfc_sli4_cfg_mhdr));
14823 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14824 LPFC_MBOX_OPCODE_EQ_CREATE,
14825 length, LPFC_SLI4_MBX_EMBED);
14826 eq_create = &mbox->u.mqe.un.eq_create;
James Smart7365f6f2018-02-22 08:18:46 -080014827 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040014828 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14829 eq->page_count);
14830 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14831 LPFC_EQE_SIZE);
14832 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
James Smart7365f6f2018-02-22 08:18:46 -080014833
14834 /* Use version 2 of CREATE_EQ if eqav is set */
14835 if (phba->sli4_hba.pc_sli4_params.eqav) {
14836 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14837 LPFC_Q_CREATE_VERSION_2);
14838 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14839 phba->sli4_hba.pc_sli4_params.eqav);
14840 }
14841
James Smart2c9c5a02015-04-07 15:07:15 -040014842 /* don't setup delay multiplier using EQ_CREATE */
14843 dmult = 0;
James Smart4f774512009-05-22 14:52:35 -040014844 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14845 dmult);
14846 switch (eq->entry_count) {
14847 default:
14848 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14849 "0360 Unsupported EQ count. (%d)\n",
14850 eq->entry_count);
James Smart04d210c2019-05-21 17:49:03 -070014851 if (eq->entry_count < 256) {
14852 status = -EINVAL;
14853 goto out;
14854 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060014855 /* fall through - otherwise default to smallest count */
James Smart4f774512009-05-22 14:52:35 -040014856 case 256:
14857 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14858 LPFC_EQ_CNT_256);
14859 break;
14860 case 512:
14861 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14862 LPFC_EQ_CNT_512);
14863 break;
14864 case 1024:
14865 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14866 LPFC_EQ_CNT_1024);
14867 break;
14868 case 2048:
14869 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14870 LPFC_EQ_CNT_2048);
14871 break;
14872 case 4096:
14873 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14874 LPFC_EQ_CNT_4096);
14875 break;
14876 }
14877 list_for_each_entry(dmabuf, &eq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014878 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040014879 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14880 putPaddrLow(dmabuf->phys);
14881 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14882 putPaddrHigh(dmabuf->phys);
14883 }
14884 mbox->vport = phba->pport;
14885 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart3e1f0712018-11-29 16:09:29 -080014886 mbox->ctx_buf = NULL;
14887 mbox->ctx_ndlp = NULL;
James Smart4f774512009-05-22 14:52:35 -040014888 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart4f774512009-05-22 14:52:35 -040014889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14891 if (shdr_status || shdr_add_status || rc) {
14892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14893 "2500 EQ_CREATE mailbox failed with "
14894 "status x%x add_status x%x, mbx status x%x\n",
14895 shdr_status, shdr_add_status, rc);
14896 status = -ENXIO;
14897 }
14898 eq->type = LPFC_EQ;
14899 eq->subtype = LPFC_NONE;
14900 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14901 if (eq->queue_id == 0xFFFF)
14902 status = -ENXIO;
14903 eq->host_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080014904 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14905 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
James Smart04d210c2019-05-21 17:49:03 -070014906out:
James Smart8fa38512009-07-19 10:01:03 -040014907 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014908 return status;
14909}
14910
14911/**
14912 * lpfc_cq_create - Create a Completion Queue on the HBA
14913 * @phba: HBA structure that indicates port to create a queue on.
14914 * @cq: The queue structure to use to create the completion queue.
14915 * @eq: The event queue to bind this completion queue to.
14916 *
14917 * This function creates a completion queue, as detailed in @wq, on a port,
14918 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14919 *
14920 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14921 * is used to get the entry count and entry size that are necessary to
14922 * determine the number of pages to allocate and use for this queue. The @eq
14923 * is used to indicate which event queue to bind this completion queue to. This
14924 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14925 * completion queue. This function is asynchronous and will wait for the mailbox
14926 * command to finish before continuing.
14927 *
14928 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014929 * memory this function will return -ENOMEM. If the queue create mailbox command
14930 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014931 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014932int
James Smart4f774512009-05-22 14:52:35 -040014933lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14934 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14935{
14936 struct lpfc_mbx_cq_create *cq_create;
14937 struct lpfc_dmabuf *dmabuf;
14938 LPFC_MBOXQ_t *mbox;
14939 int rc, length, status = 0;
14940 uint32_t shdr_status, shdr_add_status;
14941 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014942
James Smart2e90f4b2011-12-13 13:22:37 -050014943 /* sanity check on queue memory */
14944 if (!cq || !eq)
14945 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014946
James Smart4f774512009-05-22 14:52:35 -040014947 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14948 if (!mbox)
14949 return -ENOMEM;
14950 length = (sizeof(struct lpfc_mbx_cq_create) -
14951 sizeof(struct lpfc_sli4_cfg_mhdr));
14952 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14953 LPFC_MBOX_OPCODE_CQ_CREATE,
14954 length, LPFC_SLI4_MBX_EMBED);
14955 cq_create = &mbox->u.mqe.un.cq_create;
James Smart5a6f1332011-03-11 16:05:35 -050014956 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040014957 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14958 cq->page_count);
14959 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14960 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050014961 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14962 phba->sli4_hba.pc_sli4_params.cqv);
14963 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
James Smart81b96ed2017-11-20 16:00:29 -080014964 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14965 (cq->page_size / SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050014966 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14967 eq->queue_id);
James Smart7365f6f2018-02-22 08:18:46 -080014968 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14969 phba->sli4_hba.pc_sli4_params.cqav);
James Smart5a6f1332011-03-11 16:05:35 -050014970 } else {
14971 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14972 eq->queue_id);
14973 }
James Smart4f774512009-05-22 14:52:35 -040014974 switch (cq->entry_count) {
James Smart81b96ed2017-11-20 16:00:29 -080014975 case 2048:
14976 case 4096:
14977 if (phba->sli4_hba.pc_sli4_params.cqv ==
14978 LPFC_Q_CREATE_VERSION_2) {
14979 cq_create->u.request.context.lpfc_cq_context_count =
14980 cq->entry_count;
14981 bf_set(lpfc_cq_context_count,
14982 &cq_create->u.request.context,
14983 LPFC_CQ_CNT_WORD7);
14984 break;
14985 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060014986 /* fall through */
James Smart4f774512009-05-22 14:52:35 -040014987 default:
14988 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2ea259e2017-02-12 13:52:27 -080014989 "0361 Unsupported CQ count: "
James Smart64eb4dc2017-05-15 15:20:49 -070014990 "entry cnt %d sz %d pg cnt %d\n",
James Smart2ea259e2017-02-12 13:52:27 -080014991 cq->entry_count, cq->entry_size,
James Smart64eb4dc2017-05-15 15:20:49 -070014992 cq->page_count);
James Smart4f4c1862012-06-12 13:54:02 -040014993 if (cq->entry_count < 256) {
14994 status = -EINVAL;
14995 goto out;
14996 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060014997 /* fall through - otherwise default to smallest count */
James Smart4f774512009-05-22 14:52:35 -040014998 case 256:
14999 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15000 LPFC_CQ_CNT_256);
15001 break;
15002 case 512:
15003 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15004 LPFC_CQ_CNT_512);
15005 break;
15006 case 1024:
15007 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15008 LPFC_CQ_CNT_1024);
15009 break;
15010 }
15011 list_for_each_entry(dmabuf, &cq->page_list, list) {
James Smart81b96ed2017-11-20 16:00:29 -080015012 memset(dmabuf->virt, 0, cq->page_size);
James Smart4f774512009-05-22 14:52:35 -040015013 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15014 putPaddrLow(dmabuf->phys);
15015 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15016 putPaddrHigh(dmabuf->phys);
15017 }
15018 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15019
15020 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040015021 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15022 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15023 if (shdr_status || shdr_add_status || rc) {
15024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15025 "2501 CQ_CREATE mailbox failed with "
15026 "status x%x add_status x%x, mbx status x%x\n",
15027 shdr_status, shdr_add_status, rc);
15028 status = -ENXIO;
15029 goto out;
15030 }
15031 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15032 if (cq->queue_id == 0xFFFF) {
15033 status = -ENXIO;
15034 goto out;
15035 }
15036 /* link the cq onto the parent eq child list */
15037 list_add_tail(&cq->list, &eq->child_list);
15038 /* Set up completion queue's type and subtype */
15039 cq->type = type;
15040 cq->subtype = subtype;
15041 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
James Smart2a622bf2011-02-16 12:40:06 -050015042 cq->assoc_qid = eq->queue_id;
James Smart6a828b02019-01-28 11:14:31 -080015043 cq->assoc_qp = eq;
James Smart4f774512009-05-22 14:52:35 -040015044 cq->host_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015045 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15046 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
James Smart4f774512009-05-22 14:52:35 -040015047
James Smart6a828b02019-01-28 11:14:31 -080015048 if (cq->queue_id > phba->sli4_hba.cq_max)
15049 phba->sli4_hba.cq_max = cq->queue_id;
James Smart8fa38512009-07-19 10:01:03 -040015050out:
15051 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015052 return status;
15053}
15054
15055/**
James Smart2d7dbc42017-02-12 13:52:35 -080015056 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15057 * @phba: HBA structure that indicates port to create a queue on.
15058 * @cqp: The queue structure array to use to create the completion queues.
James Smartcdb42be2019-01-28 11:14:21 -080015059 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
James Smart2d7dbc42017-02-12 13:52:35 -080015060 *
15061 * This function creates a set of completion queue, s to support MRQ
15062 * as detailed in @cqp, on a port,
15063 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15064 *
15065 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15066 * is used to get the entry count and entry size that are necessary to
15067 * determine the number of pages to allocate and use for this queue. The @eq
15068 * is used to indicate which event queue to bind this completion queue to. This
15069 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15070 * completion queue. This function is asynchronous and will wait for the mailbox
15071 * command to finish before continuing.
15072 *
15073 * On success this function will return a zero. If unable to allocate enough
15074 * memory this function will return -ENOMEM. If the queue create mailbox command
15075 * fails this function will return -ENXIO.
15076 **/
15077int
15078lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
James Smartcdb42be2019-01-28 11:14:21 -080015079 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15080 uint32_t subtype)
James Smart2d7dbc42017-02-12 13:52:35 -080015081{
15082 struct lpfc_queue *cq;
15083 struct lpfc_queue *eq;
15084 struct lpfc_mbx_cq_create_set *cq_set;
15085 struct lpfc_dmabuf *dmabuf;
15086 LPFC_MBOXQ_t *mbox;
15087 int rc, length, alloclen, status = 0;
15088 int cnt, idx, numcq, page_idx = 0;
15089 uint32_t shdr_status, shdr_add_status;
15090 union lpfc_sli4_cfg_shdr *shdr;
15091 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15092
15093 /* sanity check on queue memory */
15094 numcq = phba->cfg_nvmet_mrq;
James Smartcdb42be2019-01-28 11:14:21 -080015095 if (!cqp || !hdwq || !numcq)
James Smart2d7dbc42017-02-12 13:52:35 -080015096 return -ENODEV;
James Smart2d7dbc42017-02-12 13:52:35 -080015097
15098 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15099 if (!mbox)
15100 return -ENOMEM;
15101
15102 length = sizeof(struct lpfc_mbx_cq_create_set);
15103 length += ((numcq * cqp[0]->page_count) *
15104 sizeof(struct dma_address));
15105 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15106 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15107 LPFC_SLI4_MBX_NEMBED);
15108 if (alloclen < length) {
15109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15110 "3098 Allocated DMA memory size (%d) is "
15111 "less than the requested DMA memory size "
15112 "(%d)\n", alloclen, length);
15113 status = -ENOMEM;
15114 goto out;
15115 }
15116 cq_set = mbox->sge_array->addr[0];
15117 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15118 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15119
15120 for (idx = 0; idx < numcq; idx++) {
15121 cq = cqp[idx];
James Smartcdb42be2019-01-28 11:14:21 -080015122 eq = hdwq[idx].hba_eq;
James Smart2d7dbc42017-02-12 13:52:35 -080015123 if (!cq || !eq) {
15124 status = -ENOMEM;
15125 goto out;
15126 }
James Smart81b96ed2017-11-20 16:00:29 -080015127 if (!phba->sli4_hba.pc_sli4_params.supported)
15128 hw_page_size = cq->page_size;
James Smart2d7dbc42017-02-12 13:52:35 -080015129
15130 switch (idx) {
15131 case 0:
15132 bf_set(lpfc_mbx_cq_create_set_page_size,
15133 &cq_set->u.request,
15134 (hw_page_size / SLI4_PAGE_SIZE));
15135 bf_set(lpfc_mbx_cq_create_set_num_pages,
15136 &cq_set->u.request, cq->page_count);
15137 bf_set(lpfc_mbx_cq_create_set_evt,
15138 &cq_set->u.request, 1);
15139 bf_set(lpfc_mbx_cq_create_set_valid,
15140 &cq_set->u.request, 1);
15141 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15142 &cq_set->u.request, 0);
15143 bf_set(lpfc_mbx_cq_create_set_num_cq,
15144 &cq_set->u.request, numcq);
James Smart7365f6f2018-02-22 08:18:46 -080015145 bf_set(lpfc_mbx_cq_create_set_autovalid,
15146 &cq_set->u.request,
15147 phba->sli4_hba.pc_sli4_params.cqav);
James Smart2d7dbc42017-02-12 13:52:35 -080015148 switch (cq->entry_count) {
James Smart81b96ed2017-11-20 16:00:29 -080015149 case 2048:
15150 case 4096:
15151 if (phba->sli4_hba.pc_sli4_params.cqv ==
15152 LPFC_Q_CREATE_VERSION_2) {
15153 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15154 &cq_set->u.request,
15155 cq->entry_count);
15156 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15157 &cq_set->u.request,
15158 LPFC_CQ_CNT_WORD7);
15159 break;
15160 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060015161 /* fall through */
James Smart2d7dbc42017-02-12 13:52:35 -080015162 default:
15163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15164 "3118 Bad CQ count. (%d)\n",
15165 cq->entry_count);
15166 if (cq->entry_count < 256) {
15167 status = -EINVAL;
15168 goto out;
15169 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060015170 /* fall through - otherwise default to smallest */
James Smart2d7dbc42017-02-12 13:52:35 -080015171 case 256:
15172 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15173 &cq_set->u.request, LPFC_CQ_CNT_256);
15174 break;
15175 case 512:
15176 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15177 &cq_set->u.request, LPFC_CQ_CNT_512);
15178 break;
15179 case 1024:
15180 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15181 &cq_set->u.request, LPFC_CQ_CNT_1024);
15182 break;
15183 }
15184 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15185 &cq_set->u.request, eq->queue_id);
15186 break;
15187 case 1:
15188 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15189 &cq_set->u.request, eq->queue_id);
15190 break;
15191 case 2:
15192 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15193 &cq_set->u.request, eq->queue_id);
15194 break;
15195 case 3:
15196 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15197 &cq_set->u.request, eq->queue_id);
15198 break;
15199 case 4:
15200 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15201 &cq_set->u.request, eq->queue_id);
15202 break;
15203 case 5:
15204 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15205 &cq_set->u.request, eq->queue_id);
15206 break;
15207 case 6:
15208 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15209 &cq_set->u.request, eq->queue_id);
15210 break;
15211 case 7:
15212 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15213 &cq_set->u.request, eq->queue_id);
15214 break;
15215 case 8:
15216 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15217 &cq_set->u.request, eq->queue_id);
15218 break;
15219 case 9:
15220 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15221 &cq_set->u.request, eq->queue_id);
15222 break;
15223 case 10:
15224 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15225 &cq_set->u.request, eq->queue_id);
15226 break;
15227 case 11:
15228 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15229 &cq_set->u.request, eq->queue_id);
15230 break;
15231 case 12:
15232 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15233 &cq_set->u.request, eq->queue_id);
15234 break;
15235 case 13:
15236 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15237 &cq_set->u.request, eq->queue_id);
15238 break;
15239 case 14:
15240 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15241 &cq_set->u.request, eq->queue_id);
15242 break;
15243 case 15:
15244 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15245 &cq_set->u.request, eq->queue_id);
15246 break;
15247 }
15248
15249 /* link the cq onto the parent eq child list */
15250 list_add_tail(&cq->list, &eq->child_list);
15251 /* Set up completion queue's type and subtype */
15252 cq->type = type;
15253 cq->subtype = subtype;
15254 cq->assoc_qid = eq->queue_id;
James Smart6a828b02019-01-28 11:14:31 -080015255 cq->assoc_qp = eq;
James Smart2d7dbc42017-02-12 13:52:35 -080015256 cq->host_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015257 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15258 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15259 cq->entry_count);
James Smart81b96ed2017-11-20 16:00:29 -080015260 cq->chann = idx;
James Smart2d7dbc42017-02-12 13:52:35 -080015261
15262 rc = 0;
15263 list_for_each_entry(dmabuf, &cq->page_list, list) {
15264 memset(dmabuf->virt, 0, hw_page_size);
15265 cnt = page_idx + dmabuf->buffer_tag;
15266 cq_set->u.request.page[cnt].addr_lo =
15267 putPaddrLow(dmabuf->phys);
15268 cq_set->u.request.page[cnt].addr_hi =
15269 putPaddrHigh(dmabuf->phys);
15270 rc++;
15271 }
15272 page_idx += rc;
15273 }
15274
15275 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15276
15277 /* The IOCTL status is embedded in the mailbox subheader. */
15278 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15279 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15280 if (shdr_status || shdr_add_status || rc) {
15281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15282 "3119 CQ_CREATE_SET mailbox failed with "
15283 "status x%x add_status x%x, mbx status x%x\n",
15284 shdr_status, shdr_add_status, rc);
15285 status = -ENXIO;
15286 goto out;
15287 }
15288 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15289 if (rc == 0xFFFF) {
15290 status = -ENXIO;
15291 goto out;
15292 }
15293
15294 for (idx = 0; idx < numcq; idx++) {
15295 cq = cqp[idx];
15296 cq->queue_id = rc + idx;
James Smart6a828b02019-01-28 11:14:31 -080015297 if (cq->queue_id > phba->sli4_hba.cq_max)
15298 phba->sli4_hba.cq_max = cq->queue_id;
James Smart2d7dbc42017-02-12 13:52:35 -080015299 }
15300
15301out:
15302 lpfc_sli4_mbox_cmd_free(phba, mbox);
15303 return status;
15304}
15305
15306/**
James Smartb19a0612010-04-06 14:48:51 -040015307 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
James Smart04c68492009-05-22 14:52:52 -040015308 * @phba: HBA structure that indicates port to create a queue on.
15309 * @mq: The queue structure to use to create the mailbox queue.
James Smartb19a0612010-04-06 14:48:51 -040015310 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15311 * @cq: The completion queue to associate with this cq.
James Smart04c68492009-05-22 14:52:52 -040015312 *
James Smartb19a0612010-04-06 14:48:51 -040015313 * This function provides failback (fb) functionality when the
15314 * mq_create_ext fails on older FW generations. It's purpose is identical
15315 * to mq_create_ext otherwise.
James Smart04c68492009-05-22 14:52:52 -040015316 *
James Smartb19a0612010-04-06 14:48:51 -040015317 * This routine cannot fail as all attributes were previously accessed and
15318 * initialized in mq_create_ext.
James Smart04c68492009-05-22 14:52:52 -040015319 **/
James Smartb19a0612010-04-06 14:48:51 -040015320static void
15321lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15322 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
James Smart04c68492009-05-22 14:52:52 -040015323{
15324 struct lpfc_mbx_mq_create *mq_create;
15325 struct lpfc_dmabuf *dmabuf;
James Smartb19a0612010-04-06 14:48:51 -040015326 int length;
James Smart04c68492009-05-22 14:52:52 -040015327
James Smart04c68492009-05-22 14:52:52 -040015328 length = (sizeof(struct lpfc_mbx_mq_create) -
15329 sizeof(struct lpfc_sli4_cfg_mhdr));
15330 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15331 LPFC_MBOX_OPCODE_MQ_CREATE,
15332 length, LPFC_SLI4_MBX_EMBED);
15333 mq_create = &mbox->u.mqe.un.mq_create;
15334 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
James Smartb19a0612010-04-06 14:48:51 -040015335 mq->page_count);
James Smart04c68492009-05-22 14:52:52 -040015336 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
James Smartb19a0612010-04-06 14:48:51 -040015337 cq->queue_id);
James Smart04c68492009-05-22 14:52:52 -040015338 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15339 switch (mq->entry_count) {
James Smart04c68492009-05-22 14:52:52 -040015340 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050015341 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15342 LPFC_MQ_RING_SIZE_16);
James Smart04c68492009-05-22 14:52:52 -040015343 break;
15344 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050015345 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15346 LPFC_MQ_RING_SIZE_32);
James Smart04c68492009-05-22 14:52:52 -040015347 break;
15348 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050015349 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15350 LPFC_MQ_RING_SIZE_64);
James Smart04c68492009-05-22 14:52:52 -040015351 break;
15352 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050015353 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15354 LPFC_MQ_RING_SIZE_128);
James Smart04c68492009-05-22 14:52:52 -040015355 break;
15356 }
15357 list_for_each_entry(dmabuf, &mq->page_list, list) {
15358 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
James Smartb19a0612010-04-06 14:48:51 -040015359 putPaddrLow(dmabuf->phys);
James Smart04c68492009-05-22 14:52:52 -040015360 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smartb19a0612010-04-06 14:48:51 -040015361 putPaddrHigh(dmabuf->phys);
15362 }
15363}
15364
15365/**
15366 * lpfc_mq_create - Create a mailbox Queue on the HBA
15367 * @phba: HBA structure that indicates port to create a queue on.
15368 * @mq: The queue structure to use to create the mailbox queue.
15369 * @cq: The completion queue to associate with this cq.
15370 * @subtype: The queue's subtype.
15371 *
15372 * This function creates a mailbox queue, as detailed in @mq, on a port,
15373 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15374 *
15375 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15376 * is used to get the entry count and entry size that are necessary to
15377 * determine the number of pages to allocate and use for this queue. This
15378 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15379 * mailbox queue. This function is asynchronous and will wait for the mailbox
15380 * command to finish before continuing.
15381 *
15382 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040015383 * memory this function will return -ENOMEM. If the queue create mailbox command
15384 * fails this function will return -ENXIO.
James Smartb19a0612010-04-06 14:48:51 -040015385 **/
15386int32_t
15387lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15388 struct lpfc_queue *cq, uint32_t subtype)
15389{
15390 struct lpfc_mbx_mq_create *mq_create;
15391 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15392 struct lpfc_dmabuf *dmabuf;
15393 LPFC_MBOXQ_t *mbox;
15394 int rc, length, status = 0;
15395 uint32_t shdr_status, shdr_add_status;
15396 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040015397 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smartb19a0612010-04-06 14:48:51 -040015398
James Smart2e90f4b2011-12-13 13:22:37 -050015399 /* sanity check on queue memory */
15400 if (!mq || !cq)
15401 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040015402 if (!phba->sli4_hba.pc_sli4_params.supported)
15403 hw_page_size = SLI4_PAGE_SIZE;
James Smartb19a0612010-04-06 14:48:51 -040015404
15405 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15406 if (!mbox)
15407 return -ENOMEM;
15408 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15409 sizeof(struct lpfc_sli4_cfg_mhdr));
15410 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15411 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15412 length, LPFC_SLI4_MBX_EMBED);
15413
15414 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
James Smart5a6f1332011-03-11 16:05:35 -050015415 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
James Smart70f3c072010-12-15 17:57:33 -050015416 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15417 &mq_create_ext->u.request, mq->page_count);
15418 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15419 &mq_create_ext->u.request, 1);
15420 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
James Smartb19a0612010-04-06 14:48:51 -040015421 &mq_create_ext->u.request, 1);
15422 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15423 &mq_create_ext->u.request, 1);
James Smart70f3c072010-12-15 17:57:33 -050015424 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15425 &mq_create_ext->u.request, 1);
15426 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15427 &mq_create_ext->u.request, 1);
James Smartb19a0612010-04-06 14:48:51 -040015428 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050015429 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15430 phba->sli4_hba.pc_sli4_params.mqv);
15431 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15432 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15433 cq->queue_id);
15434 else
15435 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15436 cq->queue_id);
James Smartb19a0612010-04-06 14:48:51 -040015437 switch (mq->entry_count) {
15438 default:
15439 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15440 "0362 Unsupported MQ count. (%d)\n",
15441 mq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040015442 if (mq->entry_count < 16) {
15443 status = -EINVAL;
15444 goto out;
15445 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060015446 /* fall through - otherwise default to smallest count */
James Smartb19a0612010-04-06 14:48:51 -040015447 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050015448 bf_set(lpfc_mq_context_ring_size,
15449 &mq_create_ext->u.request.context,
15450 LPFC_MQ_RING_SIZE_16);
James Smartb19a0612010-04-06 14:48:51 -040015451 break;
15452 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050015453 bf_set(lpfc_mq_context_ring_size,
15454 &mq_create_ext->u.request.context,
15455 LPFC_MQ_RING_SIZE_32);
James Smartb19a0612010-04-06 14:48:51 -040015456 break;
15457 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050015458 bf_set(lpfc_mq_context_ring_size,
15459 &mq_create_ext->u.request.context,
15460 LPFC_MQ_RING_SIZE_64);
James Smartb19a0612010-04-06 14:48:51 -040015461 break;
15462 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050015463 bf_set(lpfc_mq_context_ring_size,
15464 &mq_create_ext->u.request.context,
15465 LPFC_MQ_RING_SIZE_128);
James Smartb19a0612010-04-06 14:48:51 -040015466 break;
15467 }
15468 list_for_each_entry(dmabuf, &mq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040015469 memset(dmabuf->virt, 0, hw_page_size);
James Smartb19a0612010-04-06 14:48:51 -040015470 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15471 putPaddrLow(dmabuf->phys);
15472 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smart04c68492009-05-22 14:52:52 -040015473 putPaddrHigh(dmabuf->phys);
15474 }
15475 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smartb19a0612010-04-06 14:48:51 -040015476 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15477 &mq_create_ext->u.response);
15478 if (rc != MBX_SUCCESS) {
15479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15480 "2795 MQ_CREATE_EXT failed with "
15481 "status x%x. Failback to MQ_CREATE.\n",
15482 rc);
15483 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15484 mq_create = &mbox->u.mqe.un.mq_create;
15485 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15486 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15487 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15488 &mq_create->u.response);
15489 }
15490
James Smart04c68492009-05-22 14:52:52 -040015491 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart04c68492009-05-22 14:52:52 -040015492 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15493 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15494 if (shdr_status || shdr_add_status || rc) {
15495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15496 "2502 MQ_CREATE mailbox failed with "
15497 "status x%x add_status x%x, mbx status x%x\n",
15498 shdr_status, shdr_add_status, rc);
15499 status = -ENXIO;
15500 goto out;
15501 }
James Smart04c68492009-05-22 14:52:52 -040015502 if (mq->queue_id == 0xFFFF) {
15503 status = -ENXIO;
15504 goto out;
15505 }
15506 mq->type = LPFC_MQ;
James Smart2a622bf2011-02-16 12:40:06 -050015507 mq->assoc_qid = cq->queue_id;
James Smart04c68492009-05-22 14:52:52 -040015508 mq->subtype = subtype;
15509 mq->host_index = 0;
15510 mq->hba_index = 0;
15511
15512 /* link the mq onto the parent cq child list */
15513 list_add_tail(&mq->list, &cq->child_list);
15514out:
James Smart8fa38512009-07-19 10:01:03 -040015515 mempool_free(mbox, phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040015516 return status;
15517}
15518
15519/**
James Smart4f774512009-05-22 14:52:35 -040015520 * lpfc_wq_create - Create a Work Queue on the HBA
15521 * @phba: HBA structure that indicates port to create a queue on.
15522 * @wq: The queue structure to use to create the work queue.
15523 * @cq: The completion queue to bind this work queue to.
15524 * @subtype: The subtype of the work queue indicating its functionality.
15525 *
15526 * This function creates a work queue, as detailed in @wq, on a port, described
15527 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15528 *
15529 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15530 * is used to get the entry count and entry size that are necessary to
15531 * determine the number of pages to allocate and use for this queue. The @cq
15532 * is used to indicate which completion queue to bind this work queue to. This
15533 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15534 * work queue. This function is asynchronous and will wait for the mailbox
15535 * command to finish before continuing.
15536 *
15537 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040015538 * memory this function will return -ENOMEM. If the queue create mailbox command
15539 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015540 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015541int
James Smart4f774512009-05-22 14:52:35 -040015542lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15543 struct lpfc_queue *cq, uint32_t subtype)
15544{
15545 struct lpfc_mbx_wq_create *wq_create;
15546 struct lpfc_dmabuf *dmabuf;
15547 LPFC_MBOXQ_t *mbox;
15548 int rc, length, status = 0;
15549 uint32_t shdr_status, shdr_add_status;
15550 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040015551 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart5a6f1332011-03-11 16:05:35 -050015552 struct dma_address *page;
James Smart962bc512013-01-03 15:44:00 -050015553 void __iomem *bar_memmap_p;
15554 uint32_t db_offset;
15555 uint16_t pci_barset;
James Smart1351e692018-02-22 08:18:43 -080015556 uint8_t dpp_barset;
15557 uint32_t dpp_offset;
15558 unsigned long pg_addr;
James Smart81b96ed2017-11-20 16:00:29 -080015559 uint8_t wq_create_version;
James Smart49198b32010-04-06 15:04:33 -040015560
James Smart2e90f4b2011-12-13 13:22:37 -050015561 /* sanity check on queue memory */
15562 if (!wq || !cq)
15563 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040015564 if (!phba->sli4_hba.pc_sli4_params.supported)
James Smart81b96ed2017-11-20 16:00:29 -080015565 hw_page_size = wq->page_size;
James Smart4f774512009-05-22 14:52:35 -040015566
15567 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15568 if (!mbox)
15569 return -ENOMEM;
15570 length = (sizeof(struct lpfc_mbx_wq_create) -
15571 sizeof(struct lpfc_sli4_cfg_mhdr));
15572 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15573 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15574 length, LPFC_SLI4_MBX_EMBED);
15575 wq_create = &mbox->u.mqe.un.wq_create;
James Smart5a6f1332011-03-11 16:05:35 -050015576 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040015577 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15578 wq->page_count);
15579 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15580 cq->queue_id);
James Smart0c651872013-07-15 18:33:23 -040015581
15582 /* wqv is the earliest version supported, NOT the latest */
James Smart5a6f1332011-03-11 16:05:35 -050015583 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15584 phba->sli4_hba.pc_sli4_params.wqv);
James Smart962bc512013-01-03 15:44:00 -050015585
James Smartc176ffa2018-01-30 15:58:46 -080015586 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15587 (wq->page_size > SLI4_PAGE_SIZE))
James Smart81b96ed2017-11-20 16:00:29 -080015588 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15589 else
15590 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15591
James Smart0c651872013-07-15 18:33:23 -040015592
James Smart1351e692018-02-22 08:18:43 -080015593 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15594 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15595 else
15596 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15597
15598 switch (wq_create_version) {
James Smart0c651872013-07-15 18:33:23 -040015599 case LPFC_Q_CREATE_VERSION_1:
James Smart5a6f1332011-03-11 16:05:35 -050015600 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15601 wq->entry_count);
James Smart3f247de2017-04-21 16:04:56 -070015602 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15603 LPFC_Q_CREATE_VERSION_1);
15604
James Smart5a6f1332011-03-11 16:05:35 -050015605 switch (wq->entry_size) {
15606 default:
15607 case 64:
15608 bf_set(lpfc_mbx_wq_create_wqe_size,
15609 &wq_create->u.request_1,
15610 LPFC_WQ_WQE_SIZE_64);
15611 break;
15612 case 128:
15613 bf_set(lpfc_mbx_wq_create_wqe_size,
15614 &wq_create->u.request_1,
15615 LPFC_WQ_WQE_SIZE_128);
15616 break;
15617 }
James Smart1351e692018-02-22 08:18:43 -080015618 /* Request DPP by default */
15619 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
James Smart8ea73db2017-02-12 13:52:25 -080015620 bf_set(lpfc_mbx_wq_create_page_size,
15621 &wq_create->u.request_1,
James Smart81b96ed2017-11-20 16:00:29 -080015622 (wq->page_size / SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050015623 page = wq_create->u.request_1.page;
James Smart0c651872013-07-15 18:33:23 -040015624 break;
15625 default:
James Smart1351e692018-02-22 08:18:43 -080015626 page = wq_create->u.request.page;
15627 break;
James Smart5a6f1332011-03-11 16:05:35 -050015628 }
James Smart0c651872013-07-15 18:33:23 -040015629
James Smart4f774512009-05-22 14:52:35 -040015630 list_for_each_entry(dmabuf, &wq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040015631 memset(dmabuf->virt, 0, hw_page_size);
James Smart5a6f1332011-03-11 16:05:35 -050015632 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15633 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
James Smart4f774512009-05-22 14:52:35 -040015634 }
James Smart962bc512013-01-03 15:44:00 -050015635
15636 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15637 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15638
James Smart4f774512009-05-22 14:52:35 -040015639 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15640 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040015641 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15642 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15643 if (shdr_status || shdr_add_status || rc) {
15644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15645 "2503 WQ_CREATE mailbox failed with "
15646 "status x%x add_status x%x, mbx status x%x\n",
15647 shdr_status, shdr_add_status, rc);
15648 status = -ENXIO;
15649 goto out;
15650 }
James Smart1351e692018-02-22 08:18:43 -080015651
15652 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15653 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15654 &wq_create->u.response);
15655 else
15656 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15657 &wq_create->u.response_1);
15658
James Smart4f774512009-05-22 14:52:35 -040015659 if (wq->queue_id == 0xFFFF) {
15660 status = -ENXIO;
15661 goto out;
15662 }
James Smart1351e692018-02-22 08:18:43 -080015663
15664 wq->db_format = LPFC_DB_LIST_FORMAT;
15665 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15666 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15667 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15668 &wq_create->u.response);
15669 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15670 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15672 "3265 WQ[%d] doorbell format "
15673 "not supported: x%x\n",
15674 wq->queue_id, wq->db_format);
15675 status = -EINVAL;
15676 goto out;
15677 }
15678 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15679 &wq_create->u.response);
15680 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15681 pci_barset);
15682 if (!bar_memmap_p) {
15683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15684 "3263 WQ[%d] failed to memmap "
15685 "pci barset:x%x\n",
15686 wq->queue_id, pci_barset);
15687 status = -ENOMEM;
15688 goto out;
15689 }
15690 db_offset = wq_create->u.response.doorbell_offset;
15691 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15692 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15694 "3252 WQ[%d] doorbell offset "
15695 "not supported: x%x\n",
15696 wq->queue_id, db_offset);
15697 status = -EINVAL;
15698 goto out;
15699 }
15700 wq->db_regaddr = bar_memmap_p + db_offset;
15701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15702 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15703 "format:x%x\n", wq->queue_id,
15704 pci_barset, db_offset, wq->db_format);
15705 } else
15706 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
James Smart962bc512013-01-03 15:44:00 -050015707 } else {
James Smart1351e692018-02-22 08:18:43 -080015708 /* Check if DPP was honored by the firmware */
15709 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15710 &wq_create->u.response_1);
15711 if (wq->dpp_enable) {
15712 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15713 &wq_create->u.response_1);
15714 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15715 pci_barset);
15716 if (!bar_memmap_p) {
15717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15718 "3267 WQ[%d] failed to memmap "
15719 "pci barset:x%x\n",
15720 wq->queue_id, pci_barset);
15721 status = -ENOMEM;
15722 goto out;
15723 }
15724 db_offset = wq_create->u.response_1.doorbell_offset;
15725 wq->db_regaddr = bar_memmap_p + db_offset;
15726 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15727 &wq_create->u.response_1);
15728 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15729 &wq_create->u.response_1);
15730 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15731 dpp_barset);
15732 if (!bar_memmap_p) {
15733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15734 "3268 WQ[%d] failed to memmap "
15735 "pci barset:x%x\n",
15736 wq->queue_id, dpp_barset);
15737 status = -ENOMEM;
15738 goto out;
15739 }
15740 dpp_offset = wq_create->u.response_1.dpp_offset;
15741 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15742 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15743 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15744 "dpp_id:x%x dpp_barset:x%x "
15745 "dpp_offset:x%x\n",
15746 wq->queue_id, pci_barset, db_offset,
15747 wq->dpp_id, dpp_barset, dpp_offset);
15748
15749 /* Enable combined writes for DPP aperture */
15750 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15751#ifdef CONFIG_X86
15752 rc = set_memory_wc(pg_addr, 1);
15753 if (rc) {
15754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15755 "3272 Cannot setup Combined "
15756 "Write on WQ[%d] - disable DPP\n",
15757 wq->queue_id);
15758 phba->cfg_enable_dpp = 0;
15759 }
15760#else
15761 phba->cfg_enable_dpp = 0;
15762#endif
15763 } else
15764 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
James Smart962bc512013-01-03 15:44:00 -050015765 }
James Smart895427b2017-02-12 13:52:30 -080015766 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15767 if (wq->pring == NULL) {
15768 status = -ENOMEM;
15769 goto out;
15770 }
James Smart4f774512009-05-22 14:52:35 -040015771 wq->type = LPFC_WQ;
James Smart2a622bf2011-02-16 12:40:06 -050015772 wq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040015773 wq->subtype = subtype;
15774 wq->host_index = 0;
15775 wq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015776 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
James Smart4f774512009-05-22 14:52:35 -040015777
15778 /* link the wq onto the parent cq child list */
15779 list_add_tail(&wq->list, &cq->child_list);
15780out:
James Smart8fa38512009-07-19 10:01:03 -040015781 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015782 return status;
15783}
15784
15785/**
15786 * lpfc_rq_create - Create a Receive Queue on the HBA
15787 * @phba: HBA structure that indicates port to create a queue on.
15788 * @hrq: The queue structure to use to create the header receive queue.
15789 * @drq: The queue structure to use to create the data receive queue.
15790 * @cq: The completion queue to bind this work queue to.
15791 *
15792 * This function creates a receive buffer queue pair , as detailed in @hrq and
15793 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15794 * to the HBA.
15795 *
15796 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15797 * struct is used to get the entry count that is necessary to determine the
15798 * number of pages to use for this queue. The @cq is used to indicate which
15799 * completion queue to bind received buffers that are posted to these queues to.
15800 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15801 * receive queue pair. This function is asynchronous and will wait for the
15802 * mailbox command to finish before continuing.
15803 *
15804 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040015805 * memory this function will return -ENOMEM. If the queue create mailbox command
15806 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015807 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015808int
James Smart4f774512009-05-22 14:52:35 -040015809lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15810 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15811{
15812 struct lpfc_mbx_rq_create *rq_create;
15813 struct lpfc_dmabuf *dmabuf;
15814 LPFC_MBOXQ_t *mbox;
15815 int rc, length, status = 0;
15816 uint32_t shdr_status, shdr_add_status;
15817 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040015818 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart962bc512013-01-03 15:44:00 -050015819 void __iomem *bar_memmap_p;
15820 uint32_t db_offset;
15821 uint16_t pci_barset;
James Smart49198b32010-04-06 15:04:33 -040015822
James Smart2e90f4b2011-12-13 13:22:37 -050015823 /* sanity check on queue memory */
15824 if (!hrq || !drq || !cq)
15825 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040015826 if (!phba->sli4_hba.pc_sli4_params.supported)
15827 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040015828
15829 if (hrq->entry_count != drq->entry_count)
15830 return -EINVAL;
15831 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15832 if (!mbox)
15833 return -ENOMEM;
15834 length = (sizeof(struct lpfc_mbx_rq_create) -
15835 sizeof(struct lpfc_sli4_cfg_mhdr));
15836 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15837 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15838 length, LPFC_SLI4_MBX_EMBED);
15839 rq_create = &mbox->u.mqe.un.rq_create;
James Smart5a6f1332011-03-11 16:05:35 -050015840 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15841 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15842 phba->sli4_hba.pc_sli4_params.rqv);
15843 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15844 bf_set(lpfc_rq_context_rqe_count_1,
15845 &rq_create->u.request.context,
15846 hrq->entry_count);
15847 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040015848 bf_set(lpfc_rq_context_rqe_size,
15849 &rq_create->u.request.context,
15850 LPFC_RQE_SIZE_8);
15851 bf_set(lpfc_rq_context_page_size,
15852 &rq_create->u.request.context,
James Smart8ea73db2017-02-12 13:52:25 -080015853 LPFC_RQ_PAGE_SIZE_4096);
James Smart5a6f1332011-03-11 16:05:35 -050015854 } else {
15855 switch (hrq->entry_count) {
15856 default:
15857 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15858 "2535 Unsupported RQ count. (%d)\n",
15859 hrq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040015860 if (hrq->entry_count < 512) {
15861 status = -EINVAL;
15862 goto out;
15863 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060015864 /* fall through - otherwise default to smallest count */
James Smart5a6f1332011-03-11 16:05:35 -050015865 case 512:
15866 bf_set(lpfc_rq_context_rqe_count,
15867 &rq_create->u.request.context,
15868 LPFC_RQ_RING_SIZE_512);
15869 break;
15870 case 1024:
15871 bf_set(lpfc_rq_context_rqe_count,
15872 &rq_create->u.request.context,
15873 LPFC_RQ_RING_SIZE_1024);
15874 break;
15875 case 2048:
15876 bf_set(lpfc_rq_context_rqe_count,
15877 &rq_create->u.request.context,
15878 LPFC_RQ_RING_SIZE_2048);
15879 break;
15880 case 4096:
15881 bf_set(lpfc_rq_context_rqe_count,
15882 &rq_create->u.request.context,
15883 LPFC_RQ_RING_SIZE_4096);
15884 break;
15885 }
15886 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15887 LPFC_HDR_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040015888 }
15889 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15890 cq->queue_id);
15891 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15892 hrq->page_count);
James Smart4f774512009-05-22 14:52:35 -040015893 list_for_each_entry(dmabuf, &hrq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040015894 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040015895 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15896 putPaddrLow(dmabuf->phys);
15897 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15898 putPaddrHigh(dmabuf->phys);
15899 }
James Smart962bc512013-01-03 15:44:00 -050015900 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15901 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15902
James Smart4f774512009-05-22 14:52:35 -040015903 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15904 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040015905 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15906 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15907 if (shdr_status || shdr_add_status || rc) {
15908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15909 "2504 RQ_CREATE mailbox failed with "
15910 "status x%x add_status x%x, mbx status x%x\n",
15911 shdr_status, shdr_add_status, rc);
15912 status = -ENXIO;
15913 goto out;
15914 }
15915 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15916 if (hrq->queue_id == 0xFFFF) {
15917 status = -ENXIO;
15918 goto out;
15919 }
James Smart962bc512013-01-03 15:44:00 -050015920
15921 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15922 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15923 &rq_create->u.response);
15924 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15925 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15927 "3262 RQ [%d] doorbell format not "
15928 "supported: x%x\n", hrq->queue_id,
15929 hrq->db_format);
15930 status = -EINVAL;
15931 goto out;
15932 }
15933
15934 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15935 &rq_create->u.response);
15936 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15937 if (!bar_memmap_p) {
15938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15939 "3269 RQ[%d] failed to memmap pci "
15940 "barset:x%x\n", hrq->queue_id,
15941 pci_barset);
15942 status = -ENOMEM;
15943 goto out;
15944 }
15945
15946 db_offset = rq_create->u.response.doorbell_offset;
15947 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15948 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15950 "3270 RQ[%d] doorbell offset not "
15951 "supported: x%x\n", hrq->queue_id,
15952 db_offset);
15953 status = -EINVAL;
15954 goto out;
15955 }
15956 hrq->db_regaddr = bar_memmap_p + db_offset;
15957 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta22e7db2013-04-17 20:16:37 -040015958 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15959 "format:x%x\n", hrq->queue_id, pci_barset,
15960 db_offset, hrq->db_format);
James Smart962bc512013-01-03 15:44:00 -050015961 } else {
15962 hrq->db_format = LPFC_DB_RING_FORMAT;
15963 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15964 }
James Smart4f774512009-05-22 14:52:35 -040015965 hrq->type = LPFC_HRQ;
James Smart2a622bf2011-02-16 12:40:06 -050015966 hrq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040015967 hrq->subtype = subtype;
15968 hrq->host_index = 0;
15969 hrq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015970 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart4f774512009-05-22 14:52:35 -040015971
15972 /* now create the data queue */
15973 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15974 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15975 length, LPFC_SLI4_MBX_EMBED);
James Smart5a6f1332011-03-11 16:05:35 -050015976 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15977 phba->sli4_hba.pc_sli4_params.rqv);
15978 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15979 bf_set(lpfc_rq_context_rqe_count_1,
James Smartc31098c2011-04-16 11:03:33 -040015980 &rq_create->u.request.context, hrq->entry_count);
James Smart3c603be2017-05-15 15:20:44 -070015981 if (subtype == LPFC_NVMET)
15982 rq_create->u.request.context.buffer_size =
15983 LPFC_NVMET_DATA_BUF_SIZE;
15984 else
15985 rq_create->u.request.context.buffer_size =
15986 LPFC_DATA_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040015987 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15988 LPFC_RQE_SIZE_8);
15989 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15990 (PAGE_SIZE/SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050015991 } else {
15992 switch (drq->entry_count) {
15993 default:
15994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15995 "2536 Unsupported RQ count. (%d)\n",
15996 drq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040015997 if (drq->entry_count < 512) {
15998 status = -EINVAL;
15999 goto out;
16000 }
Gustavo A. R. Silva5bd5f662018-11-27 22:32:18 -060016001 /* fall through - otherwise default to smallest count */
James Smart5a6f1332011-03-11 16:05:35 -050016002 case 512:
16003 bf_set(lpfc_rq_context_rqe_count,
16004 &rq_create->u.request.context,
16005 LPFC_RQ_RING_SIZE_512);
16006 break;
16007 case 1024:
16008 bf_set(lpfc_rq_context_rqe_count,
16009 &rq_create->u.request.context,
16010 LPFC_RQ_RING_SIZE_1024);
16011 break;
16012 case 2048:
16013 bf_set(lpfc_rq_context_rqe_count,
16014 &rq_create->u.request.context,
16015 LPFC_RQ_RING_SIZE_2048);
16016 break;
16017 case 4096:
16018 bf_set(lpfc_rq_context_rqe_count,
16019 &rq_create->u.request.context,
16020 LPFC_RQ_RING_SIZE_4096);
16021 break;
16022 }
James Smart3c603be2017-05-15 15:20:44 -070016023 if (subtype == LPFC_NVMET)
16024 bf_set(lpfc_rq_context_buf_size,
16025 &rq_create->u.request.context,
16026 LPFC_NVMET_DATA_BUF_SIZE);
16027 else
16028 bf_set(lpfc_rq_context_buf_size,
16029 &rq_create->u.request.context,
16030 LPFC_DATA_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040016031 }
16032 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16033 cq->queue_id);
16034 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16035 drq->page_count);
James Smart4f774512009-05-22 14:52:35 -040016036 list_for_each_entry(dmabuf, &drq->page_list, list) {
16037 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16038 putPaddrLow(dmabuf->phys);
16039 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16040 putPaddrHigh(dmabuf->phys);
16041 }
James Smart962bc512013-01-03 15:44:00 -050016042 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16043 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
James Smart4f774512009-05-22 14:52:35 -040016044 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16045 /* The IOCTL status is embedded in the mailbox subheader. */
16046 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16047 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16048 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16049 if (shdr_status || shdr_add_status || rc) {
16050 status = -ENXIO;
16051 goto out;
16052 }
16053 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16054 if (drq->queue_id == 0xFFFF) {
16055 status = -ENXIO;
16056 goto out;
16057 }
16058 drq->type = LPFC_DRQ;
James Smart2a622bf2011-02-16 12:40:06 -050016059 drq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040016060 drq->subtype = subtype;
16061 drq->host_index = 0;
16062 drq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016063 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart4f774512009-05-22 14:52:35 -040016064
16065 /* link the header and data RQs onto the parent cq child list */
16066 list_add_tail(&hrq->list, &cq->child_list);
16067 list_add_tail(&drq->list, &cq->child_list);
16068
16069out:
James Smart8fa38512009-07-19 10:01:03 -040016070 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016071 return status;
16072}
16073
16074/**
James Smart2d7dbc42017-02-12 13:52:35 -080016075 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16076 * @phba: HBA structure that indicates port to create a queue on.
16077 * @hrqp: The queue structure array to use to create the header receive queues.
16078 * @drqp: The queue structure array to use to create the data receive queues.
16079 * @cqp: The completion queue array to bind these receive queues to.
16080 *
16081 * This function creates a receive buffer queue pair , as detailed in @hrq and
16082 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16083 * to the HBA.
16084 *
16085 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16086 * struct is used to get the entry count that is necessary to determine the
16087 * number of pages to use for this queue. The @cq is used to indicate which
16088 * completion queue to bind received buffers that are posted to these queues to.
16089 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16090 * receive queue pair. This function is asynchronous and will wait for the
16091 * mailbox command to finish before continuing.
16092 *
16093 * On success this function will return a zero. If unable to allocate enough
16094 * memory this function will return -ENOMEM. If the queue create mailbox command
16095 * fails this function will return -ENXIO.
16096 **/
16097int
16098lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16099 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16100 uint32_t subtype)
16101{
16102 struct lpfc_queue *hrq, *drq, *cq;
16103 struct lpfc_mbx_rq_create_v2 *rq_create;
16104 struct lpfc_dmabuf *dmabuf;
16105 LPFC_MBOXQ_t *mbox;
16106 int rc, length, alloclen, status = 0;
16107 int cnt, idx, numrq, page_idx = 0;
16108 uint32_t shdr_status, shdr_add_status;
16109 union lpfc_sli4_cfg_shdr *shdr;
16110 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16111
16112 numrq = phba->cfg_nvmet_mrq;
16113 /* sanity check on array memory */
16114 if (!hrqp || !drqp || !cqp || !numrq)
16115 return -ENODEV;
16116 if (!phba->sli4_hba.pc_sli4_params.supported)
16117 hw_page_size = SLI4_PAGE_SIZE;
16118
16119 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16120 if (!mbox)
16121 return -ENOMEM;
16122
16123 length = sizeof(struct lpfc_mbx_rq_create_v2);
16124 length += ((2 * numrq * hrqp[0]->page_count) *
16125 sizeof(struct dma_address));
16126
16127 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16128 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16129 LPFC_SLI4_MBX_NEMBED);
16130 if (alloclen < length) {
16131 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16132 "3099 Allocated DMA memory size (%d) is "
16133 "less than the requested DMA memory size "
16134 "(%d)\n", alloclen, length);
16135 status = -ENOMEM;
16136 goto out;
16137 }
16138
16139
16140
16141 rq_create = mbox->sge_array->addr[0];
16142 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16143
16144 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16145 cnt = 0;
16146
16147 for (idx = 0; idx < numrq; idx++) {
16148 hrq = hrqp[idx];
16149 drq = drqp[idx];
16150 cq = cqp[idx];
16151
James Smart2d7dbc42017-02-12 13:52:35 -080016152 /* sanity check on queue memory */
16153 if (!hrq || !drq || !cq) {
16154 status = -ENODEV;
16155 goto out;
16156 }
16157
James Smart7aabe842017-03-04 09:30:22 -080016158 if (hrq->entry_count != drq->entry_count) {
16159 status = -EINVAL;
16160 goto out;
16161 }
16162
James Smart2d7dbc42017-02-12 13:52:35 -080016163 if (idx == 0) {
16164 bf_set(lpfc_mbx_rq_create_num_pages,
16165 &rq_create->u.request,
16166 hrq->page_count);
16167 bf_set(lpfc_mbx_rq_create_rq_cnt,
16168 &rq_create->u.request, (numrq * 2));
16169 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16170 1);
16171 bf_set(lpfc_rq_context_base_cq,
16172 &rq_create->u.request.context,
16173 cq->queue_id);
16174 bf_set(lpfc_rq_context_data_size,
16175 &rq_create->u.request.context,
James Smart3c603be2017-05-15 15:20:44 -070016176 LPFC_NVMET_DATA_BUF_SIZE);
James Smart2d7dbc42017-02-12 13:52:35 -080016177 bf_set(lpfc_rq_context_hdr_size,
16178 &rq_create->u.request.context,
16179 LPFC_HDR_BUF_SIZE);
16180 bf_set(lpfc_rq_context_rqe_count_1,
16181 &rq_create->u.request.context,
16182 hrq->entry_count);
16183 bf_set(lpfc_rq_context_rqe_size,
16184 &rq_create->u.request.context,
16185 LPFC_RQE_SIZE_8);
16186 bf_set(lpfc_rq_context_page_size,
16187 &rq_create->u.request.context,
16188 (PAGE_SIZE/SLI4_PAGE_SIZE));
16189 }
16190 rc = 0;
16191 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16192 memset(dmabuf->virt, 0, hw_page_size);
16193 cnt = page_idx + dmabuf->buffer_tag;
16194 rq_create->u.request.page[cnt].addr_lo =
16195 putPaddrLow(dmabuf->phys);
16196 rq_create->u.request.page[cnt].addr_hi =
16197 putPaddrHigh(dmabuf->phys);
16198 rc++;
16199 }
16200 page_idx += rc;
16201
16202 rc = 0;
16203 list_for_each_entry(dmabuf, &drq->page_list, list) {
16204 memset(dmabuf->virt, 0, hw_page_size);
16205 cnt = page_idx + dmabuf->buffer_tag;
16206 rq_create->u.request.page[cnt].addr_lo =
16207 putPaddrLow(dmabuf->phys);
16208 rq_create->u.request.page[cnt].addr_hi =
16209 putPaddrHigh(dmabuf->phys);
16210 rc++;
16211 }
16212 page_idx += rc;
16213
16214 hrq->db_format = LPFC_DB_RING_FORMAT;
16215 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16216 hrq->type = LPFC_HRQ;
16217 hrq->assoc_qid = cq->queue_id;
16218 hrq->subtype = subtype;
16219 hrq->host_index = 0;
16220 hrq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016221 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart2d7dbc42017-02-12 13:52:35 -080016222
16223 drq->db_format = LPFC_DB_RING_FORMAT;
16224 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16225 drq->type = LPFC_DRQ;
16226 drq->assoc_qid = cq->queue_id;
16227 drq->subtype = subtype;
16228 drq->host_index = 0;
16229 drq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016230 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart2d7dbc42017-02-12 13:52:35 -080016231
16232 list_add_tail(&hrq->list, &cq->child_list);
16233 list_add_tail(&drq->list, &cq->child_list);
16234 }
16235
16236 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16237 /* The IOCTL status is embedded in the mailbox subheader. */
16238 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16239 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16240 if (shdr_status || shdr_add_status || rc) {
16241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16242 "3120 RQ_CREATE mailbox failed with "
16243 "status x%x add_status x%x, mbx status x%x\n",
16244 shdr_status, shdr_add_status, rc);
16245 status = -ENXIO;
16246 goto out;
16247 }
16248 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16249 if (rc == 0xFFFF) {
16250 status = -ENXIO;
16251 goto out;
16252 }
16253
16254 /* Initialize all RQs with associated queue id */
16255 for (idx = 0; idx < numrq; idx++) {
16256 hrq = hrqp[idx];
16257 hrq->queue_id = rc + (2 * idx);
16258 drq = drqp[idx];
16259 drq->queue_id = rc + (2 * idx) + 1;
16260 }
16261
16262out:
16263 lpfc_sli4_mbox_cmd_free(phba, mbox);
16264 return status;
16265}
16266
16267/**
James Smart4f774512009-05-22 14:52:35 -040016268 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16269 * @eq: The queue structure associated with the queue to destroy.
16270 *
16271 * This function destroys a queue, as detailed in @eq by sending an mailbox
16272 * command, specific to the type of queue, to the HBA.
16273 *
16274 * The @eq struct is used to get the queue ID of the queue to destroy.
16275 *
16276 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016277 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016278 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016279int
James Smart4f774512009-05-22 14:52:35 -040016280lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16281{
16282 LPFC_MBOXQ_t *mbox;
16283 int rc, length, status = 0;
16284 uint32_t shdr_status, shdr_add_status;
16285 union lpfc_sli4_cfg_shdr *shdr;
16286
James Smart2e90f4b2011-12-13 13:22:37 -050016287 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040016288 if (!eq)
16289 return -ENODEV;
James Smart32517fc2019-01-28 11:14:33 -080016290
James Smart4f774512009-05-22 14:52:35 -040016291 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16292 if (!mbox)
16293 return -ENOMEM;
16294 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16295 sizeof(struct lpfc_sli4_cfg_mhdr));
16296 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16297 LPFC_MBOX_OPCODE_EQ_DESTROY,
16298 length, LPFC_SLI4_MBX_EMBED);
16299 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16300 eq->queue_id);
16301 mbox->vport = eq->phba->pport;
16302 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16303
16304 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16305 /* The IOCTL status is embedded in the mailbox subheader. */
16306 shdr = (union lpfc_sli4_cfg_shdr *)
16307 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16308 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16309 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16310 if (shdr_status || shdr_add_status || rc) {
16311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16312 "2505 EQ_DESTROY mailbox failed with "
16313 "status x%x add_status x%x, mbx status x%x\n",
16314 shdr_status, shdr_add_status, rc);
16315 status = -ENXIO;
16316 }
16317
16318 /* Remove eq from any list */
16319 list_del_init(&eq->list);
James Smart8fa38512009-07-19 10:01:03 -040016320 mempool_free(mbox, eq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016321 return status;
16322}
16323
16324/**
16325 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16326 * @cq: The queue structure associated with the queue to destroy.
16327 *
16328 * This function destroys a queue, as detailed in @cq by sending an mailbox
16329 * command, specific to the type of queue, to the HBA.
16330 *
16331 * The @cq struct is used to get the queue ID of the queue to destroy.
16332 *
16333 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016334 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016335 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016336int
James Smart4f774512009-05-22 14:52:35 -040016337lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16338{
16339 LPFC_MBOXQ_t *mbox;
16340 int rc, length, status = 0;
16341 uint32_t shdr_status, shdr_add_status;
16342 union lpfc_sli4_cfg_shdr *shdr;
16343
James Smart2e90f4b2011-12-13 13:22:37 -050016344 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040016345 if (!cq)
16346 return -ENODEV;
16347 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16348 if (!mbox)
16349 return -ENOMEM;
16350 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16351 sizeof(struct lpfc_sli4_cfg_mhdr));
16352 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16353 LPFC_MBOX_OPCODE_CQ_DESTROY,
16354 length, LPFC_SLI4_MBX_EMBED);
16355 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16356 cq->queue_id);
16357 mbox->vport = cq->phba->pport;
16358 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16359 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16360 /* The IOCTL status is embedded in the mailbox subheader. */
16361 shdr = (union lpfc_sli4_cfg_shdr *)
16362 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16363 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16364 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16365 if (shdr_status || shdr_add_status || rc) {
16366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16367 "2506 CQ_DESTROY mailbox failed with "
16368 "status x%x add_status x%x, mbx status x%x\n",
16369 shdr_status, shdr_add_status, rc);
16370 status = -ENXIO;
16371 }
16372 /* Remove cq from any list */
16373 list_del_init(&cq->list);
James Smart8fa38512009-07-19 10:01:03 -040016374 mempool_free(mbox, cq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016375 return status;
16376}
16377
16378/**
James Smart04c68492009-05-22 14:52:52 -040016379 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16380 * @qm: The queue structure associated with the queue to destroy.
16381 *
16382 * This function destroys a queue, as detailed in @mq by sending an mailbox
16383 * command, specific to the type of queue, to the HBA.
16384 *
16385 * The @mq struct is used to get the queue ID of the queue to destroy.
16386 *
16387 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016388 * command fails this function will return -ENXIO.
James Smart04c68492009-05-22 14:52:52 -040016389 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016390int
James Smart04c68492009-05-22 14:52:52 -040016391lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16392{
16393 LPFC_MBOXQ_t *mbox;
16394 int rc, length, status = 0;
16395 uint32_t shdr_status, shdr_add_status;
16396 union lpfc_sli4_cfg_shdr *shdr;
16397
James Smart2e90f4b2011-12-13 13:22:37 -050016398 /* sanity check on queue memory */
James Smart04c68492009-05-22 14:52:52 -040016399 if (!mq)
16400 return -ENODEV;
16401 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16402 if (!mbox)
16403 return -ENOMEM;
16404 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16405 sizeof(struct lpfc_sli4_cfg_mhdr));
16406 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16407 LPFC_MBOX_OPCODE_MQ_DESTROY,
16408 length, LPFC_SLI4_MBX_EMBED);
16409 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16410 mq->queue_id);
16411 mbox->vport = mq->phba->pport;
16412 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16413 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16414 /* The IOCTL status is embedded in the mailbox subheader. */
16415 shdr = (union lpfc_sli4_cfg_shdr *)
16416 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16417 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16418 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16419 if (shdr_status || shdr_add_status || rc) {
16420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16421 "2507 MQ_DESTROY mailbox failed with "
16422 "status x%x add_status x%x, mbx status x%x\n",
16423 shdr_status, shdr_add_status, rc);
16424 status = -ENXIO;
16425 }
16426 /* Remove mq from any list */
16427 list_del_init(&mq->list);
James Smart8fa38512009-07-19 10:01:03 -040016428 mempool_free(mbox, mq->phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040016429 return status;
16430}
16431
16432/**
James Smart4f774512009-05-22 14:52:35 -040016433 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16434 * @wq: The queue structure associated with the queue to destroy.
16435 *
16436 * This function destroys a queue, as detailed in @wq by sending an mailbox
16437 * command, specific to the type of queue, to the HBA.
16438 *
16439 * The @wq struct is used to get the queue ID of the queue to destroy.
16440 *
16441 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016442 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016443 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016444int
James Smart4f774512009-05-22 14:52:35 -040016445lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16446{
16447 LPFC_MBOXQ_t *mbox;
16448 int rc, length, status = 0;
16449 uint32_t shdr_status, shdr_add_status;
16450 union lpfc_sli4_cfg_shdr *shdr;
16451
James Smart2e90f4b2011-12-13 13:22:37 -050016452 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040016453 if (!wq)
16454 return -ENODEV;
16455 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16456 if (!mbox)
16457 return -ENOMEM;
16458 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16459 sizeof(struct lpfc_sli4_cfg_mhdr));
16460 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16461 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16462 length, LPFC_SLI4_MBX_EMBED);
16463 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16464 wq->queue_id);
16465 mbox->vport = wq->phba->pport;
16466 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16467 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16468 shdr = (union lpfc_sli4_cfg_shdr *)
16469 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16470 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16471 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16472 if (shdr_status || shdr_add_status || rc) {
16473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16474 "2508 WQ_DESTROY mailbox failed with "
16475 "status x%x add_status x%x, mbx status x%x\n",
16476 shdr_status, shdr_add_status, rc);
16477 status = -ENXIO;
16478 }
16479 /* Remove wq from any list */
16480 list_del_init(&wq->list);
James Smartd1f525a2017-04-21 16:04:55 -070016481 kfree(wq->pring);
16482 wq->pring = NULL;
James Smart8fa38512009-07-19 10:01:03 -040016483 mempool_free(mbox, wq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016484 return status;
16485}
16486
16487/**
16488 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16489 * @rq: The queue structure associated with the queue to destroy.
16490 *
16491 * This function destroys a queue, as detailed in @rq by sending an mailbox
16492 * command, specific to the type of queue, to the HBA.
16493 *
16494 * The @rq struct is used to get the queue ID of the queue to destroy.
16495 *
16496 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016497 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016498 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016499int
James Smart4f774512009-05-22 14:52:35 -040016500lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16501 struct lpfc_queue *drq)
16502{
16503 LPFC_MBOXQ_t *mbox;
16504 int rc, length, status = 0;
16505 uint32_t shdr_status, shdr_add_status;
16506 union lpfc_sli4_cfg_shdr *shdr;
16507
James Smart2e90f4b2011-12-13 13:22:37 -050016508 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040016509 if (!hrq || !drq)
16510 return -ENODEV;
16511 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16512 if (!mbox)
16513 return -ENOMEM;
16514 length = (sizeof(struct lpfc_mbx_rq_destroy) -
James Smartfedd3b72011-02-16 12:39:24 -050016515 sizeof(struct lpfc_sli4_cfg_mhdr));
James Smart4f774512009-05-22 14:52:35 -040016516 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16517 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16518 length, LPFC_SLI4_MBX_EMBED);
16519 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16520 hrq->queue_id);
16521 mbox->vport = hrq->phba->pport;
16522 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16523 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16524 /* The IOCTL status is embedded in the mailbox subheader. */
16525 shdr = (union lpfc_sli4_cfg_shdr *)
16526 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16527 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16528 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16529 if (shdr_status || shdr_add_status || rc) {
16530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16531 "2509 RQ_DESTROY mailbox failed with "
16532 "status x%x add_status x%x, mbx status x%x\n",
16533 shdr_status, shdr_add_status, rc);
16534 if (rc != MBX_TIMEOUT)
16535 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16536 return -ENXIO;
16537 }
16538 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16539 drq->queue_id);
16540 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16541 shdr = (union lpfc_sli4_cfg_shdr *)
16542 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16543 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16544 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16545 if (shdr_status || shdr_add_status || rc) {
16546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16547 "2510 RQ_DESTROY mailbox failed with "
16548 "status x%x add_status x%x, mbx status x%x\n",
16549 shdr_status, shdr_add_status, rc);
16550 status = -ENXIO;
16551 }
16552 list_del_init(&hrq->list);
16553 list_del_init(&drq->list);
James Smart8fa38512009-07-19 10:01:03 -040016554 mempool_free(mbox, hrq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016555 return status;
16556}
16557
16558/**
16559 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16560 * @phba: The virtual port for which this call being executed.
16561 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16562 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16563 * @xritag: the xritag that ties this io to the SGL pages.
16564 *
16565 * This routine will post the sgl pages for the IO that has the xritag
16566 * that is in the iocbq structure. The xritag is assigned during iocbq
16567 * creation and persists for as long as the driver is loaded.
16568 * if the caller has fewer than 256 scatter gather segments to map then
16569 * pdma_phys_addr1 should be 0.
16570 * If the caller needs to map more than 256 scatter gather segment then
16571 * pdma_phys_addr1 should be a valid physical address.
16572 * physical address for SGLs must be 64 byte aligned.
16573 * If you are going to map 2 SGL's then the first one must have 256 entries
16574 * the second sgl can have between 1 and 256 entries.
16575 *
16576 * Return codes:
16577 * 0 - Success
16578 * -ENXIO, -ENOMEM - Failure
16579 **/
16580int
16581lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16582 dma_addr_t pdma_phys_addr0,
16583 dma_addr_t pdma_phys_addr1,
16584 uint16_t xritag)
16585{
16586 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16587 LPFC_MBOXQ_t *mbox;
16588 int rc;
16589 uint32_t shdr_status, shdr_add_status;
James Smart6d368e52011-05-24 11:44:12 -040016590 uint32_t mbox_tmo;
James Smart4f774512009-05-22 14:52:35 -040016591 union lpfc_sli4_cfg_shdr *shdr;
16592
16593 if (xritag == NO_XRI) {
16594 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16595 "0364 Invalid param:\n");
16596 return -EINVAL;
16597 }
16598
16599 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16600 if (!mbox)
16601 return -ENOMEM;
16602
16603 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16604 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16605 sizeof(struct lpfc_mbx_post_sgl_pages) -
James Smartfedd3b72011-02-16 12:39:24 -050016606 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
James Smart4f774512009-05-22 14:52:35 -040016607
16608 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16609 &mbox->u.mqe.un.post_sgl_pages;
16610 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16611 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16612
16613 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16614 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16615 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16616 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16617
16618 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16619 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16620 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16621 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16622 if (!phba->sli4_hba.intr_enable)
16623 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart6d368e52011-05-24 11:44:12 -040016624 else {
James Smarta183a152011-10-10 21:32:43 -040016625 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040016626 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16627 }
James Smart4f774512009-05-22 14:52:35 -040016628 /* The IOCTL status is embedded in the mailbox subheader. */
16629 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16630 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16631 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16632 if (rc != MBX_TIMEOUT)
16633 mempool_free(mbox, phba->mbox_mem_pool);
16634 if (shdr_status || shdr_add_status || rc) {
16635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16636 "2511 POST_SGL mailbox failed with "
16637 "status x%x add_status x%x, mbx status x%x\n",
16638 shdr_status, shdr_add_status, rc);
James Smart4f774512009-05-22 14:52:35 -040016639 }
16640 return 0;
16641}
James Smart4f774512009-05-22 14:52:35 -040016642
16643/**
James Smart88a2cfb2011-07-22 18:36:33 -040016644 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
James Smart6d368e52011-05-24 11:44:12 -040016645 * @phba: pointer to lpfc hba data structure.
16646 *
16647 * This routine is invoked to post rpi header templates to the
James Smart88a2cfb2011-07-22 18:36:33 -040016648 * HBA consistent with the SLI-4 interface spec. This routine
16649 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16650 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6d368e52011-05-24 11:44:12 -040016651 *
James Smart88a2cfb2011-07-22 18:36:33 -040016652 * Returns
16653 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16654 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16655 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016656static uint16_t
James Smart6d368e52011-05-24 11:44:12 -040016657lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16658{
16659 unsigned long xri;
16660
16661 /*
16662 * Fetch the next logical xri. Because this index is logical,
16663 * the driver starts at 0 each time.
16664 */
16665 spin_lock_irq(&phba->hbalock);
16666 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16667 phba->sli4_hba.max_cfg_param.max_xri, 0);
16668 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16669 spin_unlock_irq(&phba->hbalock);
16670 return NO_XRI;
16671 } else {
16672 set_bit(xri, phba->sli4_hba.xri_bmask);
16673 phba->sli4_hba.max_cfg_param.xri_used++;
James Smart6d368e52011-05-24 11:44:12 -040016674 }
James Smart6d368e52011-05-24 11:44:12 -040016675 spin_unlock_irq(&phba->hbalock);
16676 return xri;
16677}
16678
16679/**
16680 * lpfc_sli4_free_xri - Release an xri for reuse.
16681 * @phba: pointer to lpfc hba data structure.
16682 *
16683 * This routine is invoked to release an xri to the pool of
16684 * available rpis maintained by the driver.
16685 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016686static void
James Smart6d368e52011-05-24 11:44:12 -040016687__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16688{
16689 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
James Smart6d368e52011-05-24 11:44:12 -040016690 phba->sli4_hba.max_cfg_param.xri_used--;
16691 }
16692}
16693
16694/**
16695 * lpfc_sli4_free_xri - Release an xri for reuse.
16696 * @phba: pointer to lpfc hba data structure.
16697 *
16698 * This routine is invoked to release an xri to the pool of
16699 * available rpis maintained by the driver.
16700 **/
16701void
16702lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16703{
16704 spin_lock_irq(&phba->hbalock);
16705 __lpfc_sli4_free_xri(phba, xri);
16706 spin_unlock_irq(&phba->hbalock);
16707}
16708
16709/**
James Smart4f774512009-05-22 14:52:35 -040016710 * lpfc_sli4_next_xritag - Get an xritag for the io
16711 * @phba: Pointer to HBA context object.
16712 *
16713 * This function gets an xritag for the iocb. If there is no unused xritag
16714 * it will return 0xffff.
16715 * The function returns the allocated xritag if successful, else returns zero.
16716 * Zero is not a valid xritag.
16717 * The caller is not required to hold any lock.
16718 **/
16719uint16_t
16720lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16721{
James Smart6d368e52011-05-24 11:44:12 -040016722 uint16_t xri_index;
James Smart4f774512009-05-22 14:52:35 -040016723
James Smart6d368e52011-05-24 11:44:12 -040016724 xri_index = lpfc_sli4_alloc_xri(phba);
James Smart81378052012-05-09 21:17:37 -040016725 if (xri_index == NO_XRI)
16726 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16727 "2004 Failed to allocate XRI.last XRITAG is %d"
16728 " Max XRI is %d, Used XRI is %d\n",
16729 xri_index,
16730 phba->sli4_hba.max_cfg_param.max_xri,
16731 phba->sli4_hba.max_cfg_param.xri_used);
16732 return xri_index;
James Smart4f774512009-05-22 14:52:35 -040016733}
16734
16735/**
James Smart895427b2017-02-12 13:52:30 -080016736 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
James Smart4f774512009-05-22 14:52:35 -040016737 * @phba: pointer to lpfc hba data structure.
James Smart8a9d2e82012-05-09 21:16:12 -040016738 * @post_sgl_list: pointer to els sgl entry list.
16739 * @count: number of els sgl entries on the list.
James Smart4f774512009-05-22 14:52:35 -040016740 *
16741 * This routine is invoked to post a block of driver's sgl pages to the
16742 * HBA using non-embedded mailbox command. No Lock is held. This routine
16743 * is only called when the driver is loading and after all IO has been
16744 * stopped.
16745 **/
James Smart8a9d2e82012-05-09 21:16:12 -040016746static int
James Smart895427b2017-02-12 13:52:30 -080016747lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
James Smart8a9d2e82012-05-09 21:16:12 -040016748 struct list_head *post_sgl_list,
16749 int post_cnt)
James Smart4f774512009-05-22 14:52:35 -040016750{
James Smart8a9d2e82012-05-09 21:16:12 -040016751 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
James Smart4f774512009-05-22 14:52:35 -040016752 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16753 struct sgl_page_pairs *sgl_pg_pairs;
16754 void *viraddr;
16755 LPFC_MBOXQ_t *mbox;
16756 uint32_t reqlen, alloclen, pg_pairs;
16757 uint32_t mbox_tmo;
James Smart8a9d2e82012-05-09 21:16:12 -040016758 uint16_t xritag_start = 0;
16759 int rc = 0;
James Smart4f774512009-05-22 14:52:35 -040016760 uint32_t shdr_status, shdr_add_status;
16761 union lpfc_sli4_cfg_shdr *shdr;
16762
James Smart895427b2017-02-12 13:52:30 -080016763 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040016764 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040016765 if (reqlen > SLI4_PAGE_SIZE) {
James Smart895427b2017-02-12 13:52:30 -080016766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart4f774512009-05-22 14:52:35 -040016767 "2559 Block sgl registration required DMA "
16768 "size (%d) great than a page\n", reqlen);
16769 return -ENOMEM;
16770 }
James Smart895427b2017-02-12 13:52:30 -080016771
James Smart4f774512009-05-22 14:52:35 -040016772 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -040016773 if (!mbox)
James Smart4f774512009-05-22 14:52:35 -040016774 return -ENOMEM;
James Smart4f774512009-05-22 14:52:35 -040016775
16776 /* Allocate DMA memory and set up the non-embedded mailbox command */
16777 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16778 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16779 LPFC_SLI4_MBX_NEMBED);
16780
16781 if (alloclen < reqlen) {
16782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16783 "0285 Allocated DMA memory size (%d) is "
16784 "less than the requested DMA memory "
16785 "size (%d)\n", alloclen, reqlen);
16786 lpfc_sli4_mbox_cmd_free(phba, mbox);
16787 return -ENOMEM;
16788 }
James Smart4f774512009-05-22 14:52:35 -040016789 /* Set up the SGL pages in the non-embedded DMA pages */
James Smart6d368e52011-05-24 11:44:12 -040016790 viraddr = mbox->sge_array->addr[0];
James Smart4f774512009-05-22 14:52:35 -040016791 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16792 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16793
James Smart8a9d2e82012-05-09 21:16:12 -040016794 pg_pairs = 0;
16795 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
James Smart4f774512009-05-22 14:52:35 -040016796 /* Set up the sge entry */
16797 sgl_pg_pairs->sgl_pg0_addr_lo =
16798 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16799 sgl_pg_pairs->sgl_pg0_addr_hi =
16800 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16801 sgl_pg_pairs->sgl_pg1_addr_lo =
16802 cpu_to_le32(putPaddrLow(0));
16803 sgl_pg_pairs->sgl_pg1_addr_hi =
16804 cpu_to_le32(putPaddrHigh(0));
James Smart6d368e52011-05-24 11:44:12 -040016805
James Smart4f774512009-05-22 14:52:35 -040016806 /* Keep the first xritag on the list */
16807 if (pg_pairs == 0)
16808 xritag_start = sglq_entry->sli4_xritag;
16809 sgl_pg_pairs++;
James Smart8a9d2e82012-05-09 21:16:12 -040016810 pg_pairs++;
James Smart4f774512009-05-22 14:52:35 -040016811 }
James Smart6d368e52011-05-24 11:44:12 -040016812
16813 /* Complete initialization and perform endian conversion. */
James Smart4f774512009-05-22 14:52:35 -040016814 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
James Smart895427b2017-02-12 13:52:30 -080016815 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
James Smart4f774512009-05-22 14:52:35 -040016816 sgl->word0 = cpu_to_le32(sgl->word0);
James Smart895427b2017-02-12 13:52:30 -080016817
James Smart4f774512009-05-22 14:52:35 -040016818 if (!phba->sli4_hba.intr_enable)
16819 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16820 else {
James Smarta183a152011-10-10 21:32:43 -040016821 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040016822 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16823 }
16824 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16825 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16826 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16827 if (rc != MBX_TIMEOUT)
16828 lpfc_sli4_mbox_cmd_free(phba, mbox);
16829 if (shdr_status || shdr_add_status || rc) {
16830 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16831 "2513 POST_SGL_BLOCK mailbox command failed "
16832 "status x%x add_status x%x mbx status x%x\n",
16833 shdr_status, shdr_add_status, rc);
16834 rc = -ENXIO;
16835 }
16836 return rc;
16837}
16838
16839/**
James Smart5e5b5112019-01-28 11:14:22 -080016840 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
James Smart4f774512009-05-22 14:52:35 -040016841 * @phba: pointer to lpfc hba data structure.
James Smart0794d602019-01-28 11:14:19 -080016842 * @nblist: pointer to nvme buffer list.
James Smart4f774512009-05-22 14:52:35 -040016843 * @count: number of scsi buffers on the list.
16844 *
16845 * This routine is invoked to post a block of @count scsi sgl pages from a
James Smart0794d602019-01-28 11:14:19 -080016846 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
James Smart4f774512009-05-22 14:52:35 -040016847 * No Lock is held.
16848 *
16849 **/
James Smart0794d602019-01-28 11:14:19 -080016850static int
James Smart5e5b5112019-01-28 11:14:22 -080016851lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16852 int count)
James Smart4f774512009-05-22 14:52:35 -040016853{
James Smartc4908502019-01-28 11:14:28 -080016854 struct lpfc_io_buf *lpfc_ncmd;
James Smart4f774512009-05-22 14:52:35 -040016855 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16856 struct sgl_page_pairs *sgl_pg_pairs;
16857 void *viraddr;
16858 LPFC_MBOXQ_t *mbox;
16859 uint32_t reqlen, alloclen, pg_pairs;
16860 uint32_t mbox_tmo;
16861 uint16_t xritag_start = 0;
16862 int rc = 0;
16863 uint32_t shdr_status, shdr_add_status;
16864 dma_addr_t pdma_phys_bpl1;
16865 union lpfc_sli4_cfg_shdr *shdr;
16866
16867 /* Calculate the requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -040016868 reqlen = count * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040016869 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040016870 if (reqlen > SLI4_PAGE_SIZE) {
James Smart4f774512009-05-22 14:52:35 -040016871 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
James Smart0794d602019-01-28 11:14:19 -080016872 "6118 Block sgl registration required DMA "
James Smart4f774512009-05-22 14:52:35 -040016873 "size (%d) great than a page\n", reqlen);
16874 return -ENOMEM;
16875 }
16876 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16877 if (!mbox) {
16878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart0794d602019-01-28 11:14:19 -080016879 "6119 Failed to allocate mbox cmd memory\n");
James Smart4f774512009-05-22 14:52:35 -040016880 return -ENOMEM;
16881 }
16882
16883 /* Allocate DMA memory and set up the non-embedded mailbox command */
16884 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
James Smart0794d602019-01-28 11:14:19 -080016885 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16886 reqlen, LPFC_SLI4_MBX_NEMBED);
James Smart4f774512009-05-22 14:52:35 -040016887
16888 if (alloclen < reqlen) {
16889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart0794d602019-01-28 11:14:19 -080016890 "6120 Allocated DMA memory size (%d) is "
James Smart4f774512009-05-22 14:52:35 -040016891 "less than the requested DMA memory "
16892 "size (%d)\n", alloclen, reqlen);
16893 lpfc_sli4_mbox_cmd_free(phba, mbox);
16894 return -ENOMEM;
16895 }
James Smart6d368e52011-05-24 11:44:12 -040016896
James Smart4f774512009-05-22 14:52:35 -040016897 /* Get the first SGE entry from the non-embedded DMA memory */
James Smart4f774512009-05-22 14:52:35 -040016898 viraddr = mbox->sge_array->addr[0];
16899
16900 /* Set up the SGL pages in the non-embedded DMA pages */
16901 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16902 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16903
16904 pg_pairs = 0;
James Smart0794d602019-01-28 11:14:19 -080016905 list_for_each_entry(lpfc_ncmd, nblist, list) {
James Smart4f774512009-05-22 14:52:35 -040016906 /* Set up the sge entry */
16907 sgl_pg_pairs->sgl_pg0_addr_lo =
James Smart0794d602019-01-28 11:14:19 -080016908 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
James Smart4f774512009-05-22 14:52:35 -040016909 sgl_pg_pairs->sgl_pg0_addr_hi =
James Smart0794d602019-01-28 11:14:19 -080016910 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
James Smart4f774512009-05-22 14:52:35 -040016911 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
James Smart0794d602019-01-28 11:14:19 -080016912 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16913 SGL_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040016914 else
16915 pdma_phys_bpl1 = 0;
16916 sgl_pg_pairs->sgl_pg1_addr_lo =
16917 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16918 sgl_pg_pairs->sgl_pg1_addr_hi =
16919 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16920 /* Keep the first xritag on the list */
16921 if (pg_pairs == 0)
James Smart0794d602019-01-28 11:14:19 -080016922 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -040016923 sgl_pg_pairs++;
16924 pg_pairs++;
16925 }
16926 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16927 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16928 /* Perform endian conversion if necessary */
16929 sgl->word0 = cpu_to_le32(sgl->word0);
16930
James Smart0794d602019-01-28 11:14:19 -080016931 if (!phba->sli4_hba.intr_enable) {
James Smart4f774512009-05-22 14:52:35 -040016932 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart0794d602019-01-28 11:14:19 -080016933 } else {
James Smarta183a152011-10-10 21:32:43 -040016934 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040016935 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16936 }
James Smart0794d602019-01-28 11:14:19 -080016937 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040016938 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16939 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16940 if (rc != MBX_TIMEOUT)
16941 lpfc_sli4_mbox_cmd_free(phba, mbox);
16942 if (shdr_status || shdr_add_status || rc) {
16943 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart0794d602019-01-28 11:14:19 -080016944 "6125 POST_SGL_BLOCK mailbox command failed "
James Smart4f774512009-05-22 14:52:35 -040016945 "status x%x add_status x%x mbx status x%x\n",
16946 shdr_status, shdr_add_status, rc);
16947 rc = -ENXIO;
16948 }
16949 return rc;
16950}
16951
16952/**
James Smart5e5b5112019-01-28 11:14:22 -080016953 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
James Smart0794d602019-01-28 11:14:19 -080016954 * @phba: pointer to lpfc hba data structure.
16955 * @post_nblist: pointer to the nvme buffer list.
16956 *
16957 * This routine walks a list of nvme buffers that was passed in. It attempts
16958 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16959 * uses the non-embedded SGL block post mailbox commands to post to the port.
16960 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16961 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16962 * must be local list, thus no lock is needed when manipulate the list.
16963 *
16964 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16965 **/
16966int
James Smart5e5b5112019-01-28 11:14:22 -080016967lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16968 struct list_head *post_nblist, int sb_count)
James Smart0794d602019-01-28 11:14:19 -080016969{
James Smartc4908502019-01-28 11:14:28 -080016970 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
James Smart0794d602019-01-28 11:14:19 -080016971 int status, sgl_size;
16972 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16973 dma_addr_t pdma_phys_sgl1;
16974 int last_xritag = NO_XRI;
16975 int cur_xritag;
James Smart0794d602019-01-28 11:14:19 -080016976 LIST_HEAD(prep_nblist);
16977 LIST_HEAD(blck_nblist);
16978 LIST_HEAD(nvme_nblist);
16979
16980 /* sanity check */
16981 if (sb_count <= 0)
16982 return -EINVAL;
16983
16984 sgl_size = phba->cfg_sg_dma_buf_size;
16985 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16986 list_del_init(&lpfc_ncmd->list);
16987 block_cnt++;
16988 if ((last_xritag != NO_XRI) &&
16989 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16990 /* a hole in xri block, form a sgl posting block */
16991 list_splice_init(&prep_nblist, &blck_nblist);
16992 post_cnt = block_cnt - 1;
16993 /* prepare list for next posting block */
16994 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16995 block_cnt = 1;
16996 } else {
16997 /* prepare list for next posting block */
16998 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16999 /* enough sgls for non-embed sgl mbox command */
17000 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17001 list_splice_init(&prep_nblist, &blck_nblist);
17002 post_cnt = block_cnt;
17003 block_cnt = 0;
17004 }
17005 }
17006 num_posting++;
17007 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17008
17009 /* end of repost sgl list condition for NVME buffers */
17010 if (num_posting == sb_count) {
17011 if (post_cnt == 0) {
17012 /* last sgl posting block */
17013 list_splice_init(&prep_nblist, &blck_nblist);
17014 post_cnt = block_cnt;
17015 } else if (block_cnt == 1) {
17016 /* last single sgl with non-contiguous xri */
17017 if (sgl_size > SGL_PAGE_SIZE)
17018 pdma_phys_sgl1 =
17019 lpfc_ncmd->dma_phys_sgl +
17020 SGL_PAGE_SIZE;
17021 else
17022 pdma_phys_sgl1 = 0;
17023 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17024 status = lpfc_sli4_post_sgl(
17025 phba, lpfc_ncmd->dma_phys_sgl,
17026 pdma_phys_sgl1, cur_xritag);
17027 if (status) {
James Smartc4908502019-01-28 11:14:28 -080017028 /* Post error. Buffer unavailable. */
17029 lpfc_ncmd->flags |=
17030 LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017031 } else {
James Smartc4908502019-01-28 11:14:28 -080017032 /* Post success. Bffer available. */
17033 lpfc_ncmd->flags &=
17034 ~LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017035 lpfc_ncmd->status = IOSTAT_SUCCESS;
17036 num_posted++;
17037 }
17038 /* success, put on NVME buffer sgl list */
17039 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17040 }
17041 }
17042
17043 /* continue until a nembed page worth of sgls */
17044 if (post_cnt == 0)
17045 continue;
17046
17047 /* post block of NVME buffer list sgls */
James Smart5e5b5112019-01-28 11:14:22 -080017048 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17049 post_cnt);
James Smart0794d602019-01-28 11:14:19 -080017050
17051 /* don't reset xirtag due to hole in xri block */
17052 if (block_cnt == 0)
17053 last_xritag = NO_XRI;
17054
17055 /* reset NVME buffer post count for next round of posting */
17056 post_cnt = 0;
17057
17058 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17059 while (!list_empty(&blck_nblist)) {
17060 list_remove_head(&blck_nblist, lpfc_ncmd,
James Smartc4908502019-01-28 11:14:28 -080017061 struct lpfc_io_buf, list);
James Smart0794d602019-01-28 11:14:19 -080017062 if (status) {
James Smartc4908502019-01-28 11:14:28 -080017063 /* Post error. Mark buffer unavailable. */
17064 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017065 } else {
James Smartc4908502019-01-28 11:14:28 -080017066 /* Post success, Mark buffer available. */
17067 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017068 lpfc_ncmd->status = IOSTAT_SUCCESS;
17069 num_posted++;
17070 }
17071 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17072 }
17073 }
17074 /* Push NVME buffers with sgl posted to the available list */
James Smart5e5b5112019-01-28 11:14:22 -080017075 lpfc_io_buf_replenish(phba, &nvme_nblist);
17076
James Smart0794d602019-01-28 11:14:19 -080017077 return num_posted;
17078}
17079
17080/**
James Smart4f774512009-05-22 14:52:35 -040017081 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17082 * @phba: pointer to lpfc_hba struct that the frame was received on
17083 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17084 *
17085 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17086 * valid type of frame that the LPFC driver will handle. This function will
17087 * return a zero if the frame is a valid frame or a non zero value when the
17088 * frame does not pass the check.
17089 **/
17090static int
17091lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17092{
Tomas Henzl474ffb72010-12-22 16:52:40 +010017093 /* make rctl_names static to save stack space */
James Smart4f774512009-05-22 14:52:35 -040017094 struct fc_vft_header *fc_vft_hdr;
James Smart546fc852011-03-11 16:06:29 -050017095 uint32_t *header = (uint32_t *) fc_hdr;
James Smart4f774512009-05-22 14:52:35 -040017096
James Smarte62245d2019-08-14 16:57:08 -070017097#define FC_RCTL_MDS_DIAGS 0xF4
17098
James Smart4f774512009-05-22 14:52:35 -040017099 switch (fc_hdr->fh_r_ctl) {
17100 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17101 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17102 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17103 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17104 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17105 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17106 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17107 case FC_RCTL_DD_CMD_STATUS: /* command status */
17108 case FC_RCTL_ELS_REQ: /* extended link services request */
17109 case FC_RCTL_ELS_REP: /* extended link services reply */
17110 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17111 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17112 case FC_RCTL_BA_NOP: /* basic link service NOP */
17113 case FC_RCTL_BA_ABTS: /* basic link service abort */
17114 case FC_RCTL_BA_RMC: /* remove connection */
17115 case FC_RCTL_BA_ACC: /* basic accept */
17116 case FC_RCTL_BA_RJT: /* basic reject */
17117 case FC_RCTL_BA_PRMT:
17118 case FC_RCTL_ACK_1: /* acknowledge_1 */
17119 case FC_RCTL_ACK_0: /* acknowledge_0 */
17120 case FC_RCTL_P_RJT: /* port reject */
17121 case FC_RCTL_F_RJT: /* fabric reject */
17122 case FC_RCTL_P_BSY: /* port busy */
17123 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17124 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17125 case FC_RCTL_LCR: /* link credit reset */
James Smartae9e28f2017-05-15 15:20:51 -070017126 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
James Smart4f774512009-05-22 14:52:35 -040017127 case FC_RCTL_END: /* end */
17128 break;
17129 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17130 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17131 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17132 return lpfc_fc_frame_check(phba, fc_hdr);
17133 default:
17134 goto drop;
17135 }
James Smartae9e28f2017-05-15 15:20:51 -070017136
James Smart4f774512009-05-22 14:52:35 -040017137 switch (fc_hdr->fh_type) {
17138 case FC_TYPE_BLS:
17139 case FC_TYPE_ELS:
17140 case FC_TYPE_FCP:
17141 case FC_TYPE_CT:
James Smart895427b2017-02-12 13:52:30 -080017142 case FC_TYPE_NVME:
James Smart4f774512009-05-22 14:52:35 -040017143 break;
17144 case FC_TYPE_IP:
17145 case FC_TYPE_ILS:
17146 default:
17147 goto drop;
17148 }
James Smart546fc852011-03-11 16:06:29 -050017149
James Smart4f774512009-05-22 14:52:35 -040017150 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart78e1d202017-06-01 21:07:09 -070017151 "2538 Received frame rctl:x%x, type:x%x, "
James Smart88f43a02013-04-17 20:19:44 -040017152 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
James Smart78e1d202017-06-01 21:07:09 -070017153 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17154 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17155 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17156 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17157 be32_to_cpu(header[6]));
James Smart4f774512009-05-22 14:52:35 -040017158 return 0;
17159drop:
17160 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
James Smart78e1d202017-06-01 21:07:09 -070017161 "2539 Dropped frame rctl:x%x type:x%x\n",
17162 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
James Smart4f774512009-05-22 14:52:35 -040017163 return 1;
17164}
17165
17166/**
17167 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17168 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17169 *
17170 * This function processes the FC header to retrieve the VFI from the VF
17171 * header, if one exists. This function will return the VFI if one exists
17172 * or 0 if no VSAN Header exists.
17173 **/
17174static uint32_t
17175lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17176{
17177 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17178
17179 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17180 return 0;
17181 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17182}
17183
17184/**
17185 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17186 * @phba: Pointer to the HBA structure to search for the vport on
17187 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17188 * @fcfi: The FC Fabric ID that the frame came from
17189 *
17190 * This function searches the @phba for a vport that matches the content of the
17191 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17192 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17193 * returns the matching vport pointer or NULL if unable to match frame to a
17194 * vport.
17195 **/
17196static struct lpfc_vport *
17197lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
James Smart895427b2017-02-12 13:52:30 -080017198 uint16_t fcfi, uint32_t did)
James Smart4f774512009-05-22 14:52:35 -040017199{
17200 struct lpfc_vport **vports;
17201 struct lpfc_vport *vport = NULL;
17202 int i;
James Smart939723a2012-05-09 21:19:03 -040017203
James Smartbf086112011-08-21 21:48:13 -040017204 if (did == Fabric_DID)
17205 return phba->pport;
James Smart939723a2012-05-09 21:19:03 -040017206 if ((phba->pport->fc_flag & FC_PT2PT) &&
17207 !(phba->link_state == LPFC_HBA_READY))
17208 return phba->pport;
17209
James Smart4f774512009-05-22 14:52:35 -040017210 vports = lpfc_create_vport_work_array(phba);
James Smart895427b2017-02-12 13:52:30 -080017211 if (vports != NULL) {
James Smart4f774512009-05-22 14:52:35 -040017212 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17213 if (phba->fcf.fcfi == fcfi &&
17214 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17215 vports[i]->fc_myDID == did) {
17216 vport = vports[i];
17217 break;
17218 }
17219 }
James Smart895427b2017-02-12 13:52:30 -080017220 }
James Smart4f774512009-05-22 14:52:35 -040017221 lpfc_destroy_vport_work_array(phba, vports);
17222 return vport;
17223}
17224
17225/**
James Smart45ed1192009-10-02 15:17:02 -040017226 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17227 * @vport: The vport to work on.
17228 *
17229 * This function updates the receive sequence time stamp for this vport. The
17230 * receive sequence time stamp indicates the time that the last frame of the
17231 * the sequence that has been idle for the longest amount of time was received.
17232 * the driver uses this time stamp to indicate if any received sequences have
17233 * timed out.
17234 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017235static void
James Smart45ed1192009-10-02 15:17:02 -040017236lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17237{
17238 struct lpfc_dmabuf *h_buf;
17239 struct hbq_dmabuf *dmabuf = NULL;
17240
17241 /* get the oldest sequence on the rcv list */
17242 h_buf = list_get_first(&vport->rcv_buffer_list,
17243 struct lpfc_dmabuf, list);
17244 if (!h_buf)
17245 return;
17246 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17247 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17248}
17249
17250/**
17251 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17252 * @vport: The vport that the received sequences were sent to.
17253 *
17254 * This function cleans up all outstanding received sequences. This is called
17255 * by the driver when a link event or user action invalidates all the received
17256 * sequences.
17257 **/
17258void
17259lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17260{
17261 struct lpfc_dmabuf *h_buf, *hnext;
17262 struct lpfc_dmabuf *d_buf, *dnext;
17263 struct hbq_dmabuf *dmabuf = NULL;
17264
17265 /* start with the oldest sequence on the rcv list */
17266 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17267 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17268 list_del_init(&dmabuf->hbuf.list);
17269 list_for_each_entry_safe(d_buf, dnext,
17270 &dmabuf->dbuf.list, list) {
17271 list_del_init(&d_buf->list);
17272 lpfc_in_buf_free(vport->phba, d_buf);
17273 }
17274 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17275 }
17276}
17277
17278/**
17279 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17280 * @vport: The vport that the received sequences were sent to.
17281 *
17282 * This function determines whether any received sequences have timed out by
17283 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17284 * indicates that there is at least one timed out sequence this routine will
17285 * go through the received sequences one at a time from most inactive to most
17286 * active to determine which ones need to be cleaned up. Once it has determined
17287 * that a sequence needs to be cleaned up it will simply free up the resources
17288 * without sending an abort.
17289 **/
17290void
17291lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17292{
17293 struct lpfc_dmabuf *h_buf, *hnext;
17294 struct lpfc_dmabuf *d_buf, *dnext;
17295 struct hbq_dmabuf *dmabuf = NULL;
17296 unsigned long timeout;
17297 int abort_count = 0;
17298
17299 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17300 vport->rcv_buffer_time_stamp);
17301 if (list_empty(&vport->rcv_buffer_list) ||
17302 time_before(jiffies, timeout))
17303 return;
17304 /* start with the oldest sequence on the rcv list */
17305 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17306 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17307 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17308 dmabuf->time_stamp);
17309 if (time_before(jiffies, timeout))
17310 break;
17311 abort_count++;
17312 list_del_init(&dmabuf->hbuf.list);
17313 list_for_each_entry_safe(d_buf, dnext,
17314 &dmabuf->dbuf.list, list) {
17315 list_del_init(&d_buf->list);
17316 lpfc_in_buf_free(vport->phba, d_buf);
17317 }
17318 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17319 }
17320 if (abort_count)
17321 lpfc_update_rcv_time_stamp(vport);
17322}
17323
17324/**
James Smart4f774512009-05-22 14:52:35 -040017325 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17326 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17327 *
17328 * This function searches through the existing incomplete sequences that have
17329 * been sent to this @vport. If the frame matches one of the incomplete
17330 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17331 * make up that sequence. If no sequence is found that matches this frame then
17332 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17333 * This function returns a pointer to the first dmabuf in the sequence list that
17334 * the frame was linked to.
17335 **/
17336static struct hbq_dmabuf *
17337lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17338{
17339 struct fc_frame_header *new_hdr;
17340 struct fc_frame_header *temp_hdr;
17341 struct lpfc_dmabuf *d_buf;
17342 struct lpfc_dmabuf *h_buf;
17343 struct hbq_dmabuf *seq_dmabuf = NULL;
17344 struct hbq_dmabuf *temp_dmabuf = NULL;
James Smart4360ca92015-12-16 18:12:04 -050017345 uint8_t found = 0;
James Smart4f774512009-05-22 14:52:35 -040017346
James Smart4d9ab992009-10-02 15:16:39 -040017347 INIT_LIST_HEAD(&dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040017348 dmabuf->time_stamp = jiffies;
James Smart4f774512009-05-22 14:52:35 -040017349 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart4360ca92015-12-16 18:12:04 -050017350
James Smart4f774512009-05-22 14:52:35 -040017351 /* Use the hdr_buf to find the sequence that this frame belongs to */
17352 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17353 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17354 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17355 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17356 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17357 continue;
17358 /* found a pending sequence that matches this frame */
17359 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17360 break;
17361 }
17362 if (!seq_dmabuf) {
17363 /*
17364 * This indicates first frame received for this sequence.
17365 * Queue the buffer on the vport's rcv_buffer_list.
17366 */
17367 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
James Smart45ed1192009-10-02 15:17:02 -040017368 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040017369 return dmabuf;
17370 }
17371 temp_hdr = seq_dmabuf->hbuf.virt;
James Smarteeead812009-12-21 17:01:23 -050017372 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17373 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4d9ab992009-10-02 15:16:39 -040017374 list_del_init(&seq_dmabuf->hbuf.list);
17375 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17376 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040017377 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040017378 return dmabuf;
17379 }
James Smart45ed1192009-10-02 15:17:02 -040017380 /* move this sequence to the tail to indicate a young sequence */
17381 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17382 seq_dmabuf->time_stamp = jiffies;
17383 lpfc_update_rcv_time_stamp(vport);
James Smarteeead812009-12-21 17:01:23 -050017384 if (list_empty(&seq_dmabuf->dbuf.list)) {
17385 temp_hdr = dmabuf->hbuf.virt;
17386 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17387 return seq_dmabuf;
17388 }
James Smart4f774512009-05-22 14:52:35 -040017389 /* find the correct place in the sequence to insert this frame */
James Smart4360ca92015-12-16 18:12:04 -050017390 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17391 while (!found) {
James Smart4f774512009-05-22 14:52:35 -040017392 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17393 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17394 /*
17395 * If the frame's sequence count is greater than the frame on
17396 * the list then insert the frame right after this frame
17397 */
James Smarteeead812009-12-21 17:01:23 -050017398 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17399 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4f774512009-05-22 14:52:35 -040017400 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
James Smart4360ca92015-12-16 18:12:04 -050017401 found = 1;
17402 break;
James Smart4f774512009-05-22 14:52:35 -040017403 }
James Smart4360ca92015-12-16 18:12:04 -050017404
17405 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17406 break;
17407 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
James Smart4f774512009-05-22 14:52:35 -040017408 }
James Smart4360ca92015-12-16 18:12:04 -050017409
17410 if (found)
17411 return seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040017412 return NULL;
17413}
17414
17415/**
James Smart6669f9b2009-10-02 15:16:45 -040017416 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17417 * @vport: pointer to a vitural port
17418 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17419 *
17420 * This function tries to abort from the partially assembed sequence, described
17421 * by the information from basic abbort @dmabuf. It checks to see whether such
17422 * partially assembled sequence held by the driver. If so, it shall free up all
17423 * the frames from the partially assembled sequence.
17424 *
17425 * Return
17426 * true -- if there is matching partially assembled sequence present and all
17427 * the frames freed with the sequence;
17428 * false -- if there is no matching partially assembled sequence present so
17429 * nothing got aborted in the lower layer driver
17430 **/
17431static bool
17432lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17433 struct hbq_dmabuf *dmabuf)
17434{
17435 struct fc_frame_header *new_hdr;
17436 struct fc_frame_header *temp_hdr;
17437 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17438 struct hbq_dmabuf *seq_dmabuf = NULL;
17439
17440 /* Use the hdr_buf to find the sequence that matches this frame */
17441 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17442 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17443 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17444 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17445 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17446 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17447 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17448 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17449 continue;
17450 /* found a pending sequence that matches this frame */
17451 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17452 break;
17453 }
17454
17455 /* Free up all the frames from the partially assembled sequence */
17456 if (seq_dmabuf) {
17457 list_for_each_entry_safe(d_buf, n_buf,
17458 &seq_dmabuf->dbuf.list, list) {
17459 list_del_init(&d_buf->list);
17460 lpfc_in_buf_free(vport->phba, d_buf);
17461 }
17462 return true;
17463 }
17464 return false;
17465}
17466
17467/**
James Smart6dd9e312013-01-03 15:43:37 -050017468 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17469 * @vport: pointer to a vitural port
17470 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17471 *
17472 * This function tries to abort from the assembed sequence from upper level
17473 * protocol, described by the information from basic abbort @dmabuf. It
17474 * checks to see whether such pending context exists at upper level protocol.
17475 * If so, it shall clean up the pending context.
17476 *
17477 * Return
17478 * true -- if there is matching pending context of the sequence cleaned
17479 * at ulp;
17480 * false -- if there is no matching pending context of the sequence present
17481 * at ulp.
17482 **/
17483static bool
17484lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17485{
17486 struct lpfc_hba *phba = vport->phba;
17487 int handled;
17488
17489 /* Accepting abort at ulp with SLI4 only */
17490 if (phba->sli_rev < LPFC_SLI_REV4)
17491 return false;
17492
17493 /* Register all caring upper level protocols to attend abort */
17494 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17495 if (handled)
17496 return true;
17497
17498 return false;
17499}
17500
17501/**
James Smart546fc852011-03-11 16:06:29 -050017502 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
James Smart6669f9b2009-10-02 15:16:45 -040017503 * @phba: Pointer to HBA context object.
17504 * @cmd_iocbq: pointer to the command iocbq structure.
17505 * @rsp_iocbq: pointer to the response iocbq structure.
17506 *
James Smart546fc852011-03-11 16:06:29 -050017507 * This function handles the sequence abort response iocb command complete
James Smart6669f9b2009-10-02 15:16:45 -040017508 * event. It properly releases the memory allocated to the sequence abort
17509 * accept iocb.
17510 **/
17511static void
James Smart546fc852011-03-11 16:06:29 -050017512lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
James Smart6669f9b2009-10-02 15:16:45 -040017513 struct lpfc_iocbq *cmd_iocbq,
17514 struct lpfc_iocbq *rsp_iocbq)
17515{
James Smart6dd9e312013-01-03 15:43:37 -050017516 struct lpfc_nodelist *ndlp;
17517
17518 if (cmd_iocbq) {
17519 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17520 lpfc_nlp_put(ndlp);
17521 lpfc_nlp_not_used(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040017522 lpfc_sli_release_iocbq(phba, cmd_iocbq);
James Smart6dd9e312013-01-03 15:43:37 -050017523 }
James Smart6b5151f2012-01-18 16:24:06 -050017524
17525 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17526 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17527 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17528 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17529 rsp_iocbq->iocb.ulpStatus,
17530 rsp_iocbq->iocb.un.ulpWord[4]);
James Smart6669f9b2009-10-02 15:16:45 -040017531}
17532
17533/**
James Smart6d368e52011-05-24 11:44:12 -040017534 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17535 * @phba: Pointer to HBA context object.
17536 * @xri: xri id in transaction.
17537 *
17538 * This function validates the xri maps to the known range of XRIs allocated an
17539 * used by the driver.
17540 **/
James Smart7851fe22011-07-22 18:36:52 -040017541uint16_t
James Smart6d368e52011-05-24 11:44:12 -040017542lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17543 uint16_t xri)
17544{
James Smarta2fc4aef2014-09-03 12:57:55 -040017545 uint16_t i;
James Smart6d368e52011-05-24 11:44:12 -040017546
17547 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17548 if (xri == phba->sli4_hba.xri_ids[i])
17549 return i;
17550 }
17551 return NO_XRI;
17552}
17553
James Smart6d368e52011-05-24 11:44:12 -040017554/**
James Smart546fc852011-03-11 16:06:29 -050017555 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040017556 * @phba: Pointer to HBA context object.
17557 * @fc_hdr: pointer to a FC frame header.
17558 *
James Smart546fc852011-03-11 16:06:29 -050017559 * This function sends a basic response to a previous unsol sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040017560 * event after aborting the sequence handling.
17561 **/
James Smart86c67372017-04-21 16:05:04 -070017562void
James Smart6dd9e312013-01-03 15:43:37 -050017563lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17564 struct fc_frame_header *fc_hdr, bool aborted)
James Smart6669f9b2009-10-02 15:16:45 -040017565{
James Smart6dd9e312013-01-03 15:43:37 -050017566 struct lpfc_hba *phba = vport->phba;
James Smart6669f9b2009-10-02 15:16:45 -040017567 struct lpfc_iocbq *ctiocb = NULL;
17568 struct lpfc_nodelist *ndlp;
James Smartee0f4fe2012-05-09 21:19:14 -040017569 uint16_t oxid, rxid, xri, lxri;
James Smart5ffc2662009-11-18 15:39:44 -050017570 uint32_t sid, fctl;
James Smart6669f9b2009-10-02 15:16:45 -040017571 IOCB_t *icmd;
James Smart546fc852011-03-11 16:06:29 -050017572 int rc;
James Smart6669f9b2009-10-02 15:16:45 -040017573
17574 if (!lpfc_is_link_up(phba))
17575 return;
17576
17577 sid = sli4_sid_from_fc_hdr(fc_hdr);
17578 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
James Smart5ffc2662009-11-18 15:39:44 -050017579 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
James Smart6669f9b2009-10-02 15:16:45 -040017580
James Smart6dd9e312013-01-03 15:43:37 -050017581 ndlp = lpfc_findnode_did(vport, sid);
James Smart6669f9b2009-10-02 15:16:45 -040017582 if (!ndlp) {
James Smart9d3d3402017-04-21 16:05:00 -070017583 ndlp = lpfc_nlp_init(vport, sid);
James Smart6dd9e312013-01-03 15:43:37 -050017584 if (!ndlp) {
17585 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17586 "1268 Failed to allocate ndlp for "
17587 "oxid:x%x SID:x%x\n", oxid, sid);
17588 return;
17589 }
James Smart6dd9e312013-01-03 15:43:37 -050017590 /* Put ndlp onto pport node list */
17591 lpfc_enqueue_node(vport, ndlp);
17592 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17593 /* re-setup ndlp without removing from node list */
17594 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17595 if (!ndlp) {
17596 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17597 "3275 Failed to active ndlp found "
17598 "for oxid:x%x SID:x%x\n", oxid, sid);
17599 return;
17600 }
James Smart6669f9b2009-10-02 15:16:45 -040017601 }
17602
James Smart546fc852011-03-11 16:06:29 -050017603 /* Allocate buffer for rsp iocb */
James Smart6669f9b2009-10-02 15:16:45 -040017604 ctiocb = lpfc_sli_get_iocbq(phba);
17605 if (!ctiocb)
17606 return;
17607
James Smart5ffc2662009-11-18 15:39:44 -050017608 /* Extract the F_CTL field from FC_HDR */
17609 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17610
James Smart6669f9b2009-10-02 15:16:45 -040017611 icmd = &ctiocb->iocb;
James Smart6669f9b2009-10-02 15:16:45 -040017612 icmd->un.xseq64.bdl.bdeSize = 0;
James Smart5ffc2662009-11-18 15:39:44 -050017613 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
James Smart6669f9b2009-10-02 15:16:45 -040017614 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17615 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17616 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17617
17618 /* Fill in the rest of iocb fields */
17619 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17620 icmd->ulpBdeCount = 0;
17621 icmd->ulpLe = 1;
17622 icmd->ulpClass = CLASS3;
James Smart6d368e52011-05-24 11:44:12 -040017623 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
James Smart6dd9e312013-01-03 15:43:37 -050017624 ctiocb->context1 = lpfc_nlp_get(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040017625
James Smart6669f9b2009-10-02 15:16:45 -040017626 ctiocb->vport = phba->pport;
James Smart546fc852011-03-11 16:06:29 -050017627 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
James Smart6d368e52011-05-24 11:44:12 -040017628 ctiocb->sli4_lxritag = NO_XRI;
James Smart546fc852011-03-11 16:06:29 -050017629 ctiocb->sli4_xritag = NO_XRI;
17630
James Smartee0f4fe2012-05-09 21:19:14 -040017631 if (fctl & FC_FC_EX_CTX)
17632 /* Exchange responder sent the abort so we
17633 * own the oxid.
17634 */
17635 xri = oxid;
17636 else
17637 xri = rxid;
17638 lxri = lpfc_sli4_xri_inrange(phba, xri);
17639 if (lxri != NO_XRI)
17640 lpfc_set_rrq_active(phba, ndlp, lxri,
17641 (xri == oxid) ? rxid : oxid, 0);
James Smart6dd9e312013-01-03 15:43:37 -050017642 /* For BA_ABTS from exchange responder, if the logical xri with
17643 * the oxid maps to the FCP XRI range, the port no longer has
17644 * that exchange context, send a BLS_RJT. Override the IOCB for
17645 * a BA_RJT.
James Smart546fc852011-03-11 16:06:29 -050017646 */
James Smart6dd9e312013-01-03 15:43:37 -050017647 if ((fctl & FC_FC_EX_CTX) &&
James Smart895427b2017-02-12 13:52:30 -080017648 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
James Smart6dd9e312013-01-03 15:43:37 -050017649 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17650 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17651 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17652 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17653 }
17654
17655 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17656 * the driver no longer has that exchange, send a BLS_RJT. Override
17657 * the IOCB for a BA_RJT.
17658 */
17659 if (aborted == false) {
James Smart546fc852011-03-11 16:06:29 -050017660 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17661 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17662 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17663 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17664 }
James Smart6669f9b2009-10-02 15:16:45 -040017665
James Smart5ffc2662009-11-18 15:39:44 -050017666 if (fctl & FC_FC_EX_CTX) {
17667 /* ABTS sent by responder to CT exchange, construction
17668 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17669 * field and RX_ID from ABTS for RX_ID field.
17670 */
James Smart546fc852011-03-11 16:06:29 -050017671 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
James Smart5ffc2662009-11-18 15:39:44 -050017672 } else {
17673 /* ABTS sent by initiator to CT exchange, construction
17674 * of BA_ACC will need to allocate a new XRI as for the
James Smartf09c3ac2012-03-01 22:33:29 -050017675 * XRI_TAG field.
James Smart5ffc2662009-11-18 15:39:44 -050017676 */
James Smart546fc852011-03-11 16:06:29 -050017677 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
James Smart5ffc2662009-11-18 15:39:44 -050017678 }
James Smartf09c3ac2012-03-01 22:33:29 -050017679 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
James Smart546fc852011-03-11 16:06:29 -050017680 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
James Smart5ffc2662009-11-18 15:39:44 -050017681
James Smart546fc852011-03-11 16:06:29 -050017682 /* Xmit CT abts response on exchange <xid> */
James Smart6dd9e312013-01-03 15:43:37 -050017683 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17684 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17685 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
James Smart546fc852011-03-11 16:06:29 -050017686
17687 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17688 if (rc == IOCB_ERROR) {
James Smart6dd9e312013-01-03 15:43:37 -050017689 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17690 "2925 Failed to issue CT ABTS RSP x%x on "
17691 "xri x%x, Data x%x\n",
17692 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17693 phba->link_state);
17694 lpfc_nlp_put(ndlp);
17695 ctiocb->context1 = NULL;
James Smart546fc852011-03-11 16:06:29 -050017696 lpfc_sli_release_iocbq(phba, ctiocb);
17697 }
James Smart6669f9b2009-10-02 15:16:45 -040017698}
17699
17700/**
17701 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17702 * @vport: Pointer to the vport on which this sequence was received
17703 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17704 *
17705 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17706 * receive sequence is only partially assembed by the driver, it shall abort
17707 * the partially assembled frames for the sequence. Otherwise, if the
17708 * unsolicited receive sequence has been completely assembled and passed to
17709 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17710 * unsolicited sequence has been aborted. After that, it will issue a basic
17711 * accept to accept the abort.
17712 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017713static void
James Smart6669f9b2009-10-02 15:16:45 -040017714lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17715 struct hbq_dmabuf *dmabuf)
17716{
17717 struct lpfc_hba *phba = vport->phba;
17718 struct fc_frame_header fc_hdr;
James Smart5ffc2662009-11-18 15:39:44 -050017719 uint32_t fctl;
James Smart6dd9e312013-01-03 15:43:37 -050017720 bool aborted;
James Smart6669f9b2009-10-02 15:16:45 -040017721
James Smart6669f9b2009-10-02 15:16:45 -040017722 /* Make a copy of fc_hdr before the dmabuf being released */
17723 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
James Smart5ffc2662009-11-18 15:39:44 -050017724 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
James Smart6669f9b2009-10-02 15:16:45 -040017725
James Smart5ffc2662009-11-18 15:39:44 -050017726 if (fctl & FC_FC_EX_CTX) {
James Smart6dd9e312013-01-03 15:43:37 -050017727 /* ABTS by responder to exchange, no cleanup needed */
17728 aborted = true;
James Smart5ffc2662009-11-18 15:39:44 -050017729 } else {
James Smart6dd9e312013-01-03 15:43:37 -050017730 /* ABTS by initiator to exchange, need to do cleanup */
17731 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17732 if (aborted == false)
17733 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
James Smart5ffc2662009-11-18 15:39:44 -050017734 }
James Smart6dd9e312013-01-03 15:43:37 -050017735 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17736
James Smart86c67372017-04-21 16:05:04 -070017737 if (phba->nvmet_support) {
17738 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17739 return;
17740 }
17741
James Smart6dd9e312013-01-03 15:43:37 -050017742 /* Respond with BA_ACC or BA_RJT accordingly */
17743 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
James Smart6669f9b2009-10-02 15:16:45 -040017744}
17745
17746/**
James Smart4f774512009-05-22 14:52:35 -040017747 * lpfc_seq_complete - Indicates if a sequence is complete
17748 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17749 *
17750 * This function checks the sequence, starting with the frame described by
17751 * @dmabuf, to see if all the frames associated with this sequence are present.
17752 * the frames associated with this sequence are linked to the @dmabuf using the
17753 * dbuf list. This function looks for two major things. 1) That the first frame
17754 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17755 * set. 3) That there are no holes in the sequence count. The function will
17756 * return 1 when the sequence is complete, otherwise it will return 0.
17757 **/
17758static int
17759lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17760{
17761 struct fc_frame_header *hdr;
17762 struct lpfc_dmabuf *d_buf;
17763 struct hbq_dmabuf *seq_dmabuf;
17764 uint32_t fctl;
17765 int seq_count = 0;
17766
17767 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17768 /* make sure first fame of sequence has a sequence count of zero */
17769 if (hdr->fh_seq_cnt != seq_count)
17770 return 0;
17771 fctl = (hdr->fh_f_ctl[0] << 16 |
17772 hdr->fh_f_ctl[1] << 8 |
17773 hdr->fh_f_ctl[2]);
17774 /* If last frame of sequence we can return success. */
17775 if (fctl & FC_FC_END_SEQ)
17776 return 1;
17777 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17778 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17779 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17780 /* If there is a hole in the sequence count then fail. */
James Smarteeead812009-12-21 17:01:23 -050017781 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
James Smart4f774512009-05-22 14:52:35 -040017782 return 0;
17783 fctl = (hdr->fh_f_ctl[0] << 16 |
17784 hdr->fh_f_ctl[1] << 8 |
17785 hdr->fh_f_ctl[2]);
17786 /* If last frame of sequence we can return success. */
17787 if (fctl & FC_FC_END_SEQ)
17788 return 1;
17789 }
17790 return 0;
17791}
17792
17793/**
17794 * lpfc_prep_seq - Prep sequence for ULP processing
17795 * @vport: Pointer to the vport on which this sequence was received
17796 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17797 *
17798 * This function takes a sequence, described by a list of frames, and creates
17799 * a list of iocbq structures to describe the sequence. This iocbq list will be
17800 * used to issue to the generic unsolicited sequence handler. This routine
17801 * returns a pointer to the first iocbq in the list. If the function is unable
17802 * to allocate an iocbq then it throw out the received frames that were not
17803 * able to be described and return a pointer to the first iocbq. If unable to
17804 * allocate any iocbqs (including the first) this function will return NULL.
17805 **/
17806static struct lpfc_iocbq *
17807lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17808{
James Smart7851fe22011-07-22 18:36:52 -040017809 struct hbq_dmabuf *hbq_buf;
James Smart4f774512009-05-22 14:52:35 -040017810 struct lpfc_dmabuf *d_buf, *n_buf;
17811 struct lpfc_iocbq *first_iocbq, *iocbq;
17812 struct fc_frame_header *fc_hdr;
17813 uint32_t sid;
James Smart7851fe22011-07-22 18:36:52 -040017814 uint32_t len, tot_len;
James Smarteeead812009-12-21 17:01:23 -050017815 struct ulp_bde64 *pbde;
James Smart4f774512009-05-22 14:52:35 -040017816
17817 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17818 /* remove from receive buffer list */
17819 list_del_init(&seq_dmabuf->hbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040017820 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040017821 /* get the Remote Port's SID */
James Smart6669f9b2009-10-02 15:16:45 -040017822 sid = sli4_sid_from_fc_hdr(fc_hdr);
James Smart7851fe22011-07-22 18:36:52 -040017823 tot_len = 0;
James Smart4f774512009-05-22 14:52:35 -040017824 /* Get an iocbq struct to fill in. */
17825 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17826 if (first_iocbq) {
17827 /* Initialize the first IOCB. */
James Smart8fa38512009-07-19 10:01:03 -040017828 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
James Smart4f774512009-05-22 14:52:35 -040017829 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
James Smart895427b2017-02-12 13:52:30 -080017830 first_iocbq->vport = vport;
James Smart939723a2012-05-09 21:19:03 -040017831
17832 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17833 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17834 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17835 first_iocbq->iocb.un.rcvels.parmRo =
17836 sli4_did_from_fc_hdr(fc_hdr);
17837 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17838 } else
17839 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
James Smart7851fe22011-07-22 18:36:52 -040017840 first_iocbq->iocb.ulpContext = NO_XRI;
17841 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17842 be16_to_cpu(fc_hdr->fh_ox_id);
17843 /* iocbq is prepped for internal consumption. Physical vpi. */
17844 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17845 vport->phba->vpi_ids[vport->vpi];
James Smart4f774512009-05-22 14:52:35 -040017846 /* put the first buffer into the first IOCBq */
James Smart48a5a662013-07-15 18:32:28 -040017847 tot_len = bf_get(lpfc_rcqe_length,
17848 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17849
James Smart4f774512009-05-22 14:52:35 -040017850 first_iocbq->context2 = &seq_dmabuf->dbuf;
17851 first_iocbq->context3 = NULL;
17852 first_iocbq->iocb.ulpBdeCount = 1;
James Smart48a5a662013-07-15 18:32:28 -040017853 if (tot_len > LPFC_DATA_BUF_SIZE)
17854 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
James Smart4f774512009-05-22 14:52:35 -040017855 LPFC_DATA_BUF_SIZE;
James Smart48a5a662013-07-15 18:32:28 -040017856 else
17857 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17858
James Smart4f774512009-05-22 14:52:35 -040017859 first_iocbq->iocb.un.rcvels.remoteID = sid;
James Smart48a5a662013-07-15 18:32:28 -040017860
James Smart7851fe22011-07-22 18:36:52 -040017861 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
James Smart4f774512009-05-22 14:52:35 -040017862 }
17863 iocbq = first_iocbq;
17864 /*
17865 * Each IOCBq can have two Buffers assigned, so go through the list
17866 * of buffers for this sequence and save two buffers in each IOCBq
17867 */
17868 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17869 if (!iocbq) {
17870 lpfc_in_buf_free(vport->phba, d_buf);
17871 continue;
17872 }
17873 if (!iocbq->context3) {
17874 iocbq->context3 = d_buf;
17875 iocbq->iocb.ulpBdeCount++;
James Smart7851fe22011-07-22 18:36:52 -040017876 /* We need to get the size out of the right CQE */
17877 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17878 len = bf_get(lpfc_rcqe_length,
17879 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040017880 pbde = (struct ulp_bde64 *)
17881 &iocbq->iocb.unsli3.sli3Words[4];
17882 if (len > LPFC_DATA_BUF_SIZE)
17883 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17884 else
17885 pbde->tus.f.bdeSize = len;
17886
James Smart7851fe22011-07-22 18:36:52 -040017887 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17888 tot_len += len;
James Smart4f774512009-05-22 14:52:35 -040017889 } else {
17890 iocbq = lpfc_sli_get_iocbq(vport->phba);
17891 if (!iocbq) {
17892 if (first_iocbq) {
17893 first_iocbq->iocb.ulpStatus =
17894 IOSTAT_FCP_RSP_ERROR;
17895 first_iocbq->iocb.un.ulpWord[4] =
17896 IOERR_NO_RESOURCES;
17897 }
17898 lpfc_in_buf_free(vport->phba, d_buf);
17899 continue;
17900 }
James Smart7851fe22011-07-22 18:36:52 -040017901 /* We need to get the size out of the right CQE */
17902 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17903 len = bf_get(lpfc_rcqe_length,
17904 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040017905 iocbq->context2 = d_buf;
17906 iocbq->context3 = NULL;
17907 iocbq->iocb.ulpBdeCount = 1;
17908 if (len > LPFC_DATA_BUF_SIZE)
17909 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17910 LPFC_DATA_BUF_SIZE;
17911 else
17912 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17913
James Smart7851fe22011-07-22 18:36:52 -040017914 tot_len += len;
17915 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17916
James Smart4f774512009-05-22 14:52:35 -040017917 iocbq->iocb.un.rcvels.remoteID = sid;
17918 list_add_tail(&iocbq->list, &first_iocbq->list);
17919 }
17920 }
James Smart39c4f1a2020-01-27 16:23:01 -080017921 /* Free the sequence's header buffer */
17922 if (!first_iocbq)
17923 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
17924
James Smart4f774512009-05-22 14:52:35 -040017925 return first_iocbq;
17926}
17927
James Smart6669f9b2009-10-02 15:16:45 -040017928static void
17929lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17930 struct hbq_dmabuf *seq_dmabuf)
17931{
17932 struct fc_frame_header *fc_hdr;
17933 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17934 struct lpfc_hba *phba = vport->phba;
17935
17936 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17937 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17938 if (!iocbq) {
17939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17940 "2707 Ring %d handler: Failed to allocate "
17941 "iocb Rctl x%x Type x%x received\n",
17942 LPFC_ELS_RING,
17943 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17944 return;
17945 }
17946 if (!lpfc_complete_unsol_iocb(phba,
James Smart895427b2017-02-12 13:52:30 -080017947 phba->sli4_hba.els_wq->pring,
James Smart6669f9b2009-10-02 15:16:45 -040017948 iocbq, fc_hdr->fh_r_ctl,
17949 fc_hdr->fh_type))
James Smart6d368e52011-05-24 11:44:12 -040017950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart6669f9b2009-10-02 15:16:45 -040017951 "2540 Ring %d handler: unexpected Rctl "
17952 "x%x Type x%x received\n",
17953 LPFC_ELS_RING,
17954 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17955
17956 /* Free iocb created in lpfc_prep_seq */
17957 list_for_each_entry_safe(curr_iocb, next_iocb,
17958 &iocbq->list, list) {
17959 list_del_init(&curr_iocb->list);
17960 lpfc_sli_release_iocbq(phba, curr_iocb);
17961 }
17962 lpfc_sli_release_iocbq(phba, iocbq);
17963}
17964
James Smartae9e28f2017-05-15 15:20:51 -070017965static void
17966lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17967 struct lpfc_iocbq *rspiocb)
17968{
17969 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17970
17971 if (pcmd && pcmd->virt)
Romain Perier771db5c2017-07-06 10:13:05 +020017972 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
James Smartae9e28f2017-05-15 15:20:51 -070017973 kfree(pcmd);
17974 lpfc_sli_release_iocbq(phba, cmdiocb);
James Smarte817e5d2018-12-13 15:17:53 -080017975 lpfc_drain_txq(phba);
James Smartae9e28f2017-05-15 15:20:51 -070017976}
17977
17978static void
17979lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17980 struct hbq_dmabuf *dmabuf)
17981{
17982 struct fc_frame_header *fc_hdr;
17983 struct lpfc_hba *phba = vport->phba;
17984 struct lpfc_iocbq *iocbq = NULL;
17985 union lpfc_wqe *wqe;
17986 struct lpfc_dmabuf *pcmd = NULL;
17987 uint32_t frame_len;
17988 int rc;
James Smarte817e5d2018-12-13 15:17:53 -080017989 unsigned long iflags;
James Smartae9e28f2017-05-15 15:20:51 -070017990
17991 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17992 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17993
17994 /* Send the received frame back */
17995 iocbq = lpfc_sli_get_iocbq(phba);
James Smarte817e5d2018-12-13 15:17:53 -080017996 if (!iocbq) {
17997 /* Queue cq event and wakeup worker thread to process it */
17998 spin_lock_irqsave(&phba->hbalock, iflags);
17999 list_add_tail(&dmabuf->cq_event.list,
18000 &phba->sli4_hba.sp_queue_event);
18001 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18002 spin_unlock_irqrestore(&phba->hbalock, iflags);
18003 lpfc_worker_wake_up(phba);
18004 return;
18005 }
James Smartae9e28f2017-05-15 15:20:51 -070018006
18007 /* Allocate buffer for command payload */
18008 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18009 if (pcmd)
Romain Perier771db5c2017-07-06 10:13:05 +020018010 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
James Smartae9e28f2017-05-15 15:20:51 -070018011 &pcmd->phys);
18012 if (!pcmd || !pcmd->virt)
18013 goto exit;
18014
18015 INIT_LIST_HEAD(&pcmd->list);
18016
18017 /* copyin the payload */
18018 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18019
18020 /* fill in BDE's for command */
18021 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18022 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18023 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18024 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18025
18026 iocbq->context2 = pcmd;
18027 iocbq->vport = vport;
18028 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18029 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18030
18031 /*
18032 * Setup rest of the iocb as though it were a WQE
18033 * Build the SEND_FRAME WQE
18034 */
18035 wqe = (union lpfc_wqe *)&iocbq->iocb;
18036
18037 wqe->send_frame.frame_len = frame_len;
18038 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18039 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18040 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18041 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18042 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18043 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18044
18045 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18046 iocbq->iocb.ulpLe = 1;
18047 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18048 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18049 if (rc == IOCB_ERROR)
18050 goto exit;
18051
18052 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18053 return;
18054
18055exit:
18056 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18057 "2023 Unable to process MDS loopback frame\n");
18058 if (pcmd && pcmd->virt)
Romain Perier771db5c2017-07-06 10:13:05 +020018059 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
James Smartae9e28f2017-05-15 15:20:51 -070018060 kfree(pcmd);
Dick Kennedy401bb412017-09-29 17:34:28 -070018061 if (iocbq)
18062 lpfc_sli_release_iocbq(phba, iocbq);
James Smartae9e28f2017-05-15 15:20:51 -070018063 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18064}
18065
James Smart4f774512009-05-22 14:52:35 -040018066/**
18067 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18068 * @phba: Pointer to HBA context object.
18069 *
18070 * This function is called with no lock held. This function processes all
18071 * the received buffers and gives it to upper layers when a received buffer
18072 * indicates that it is the final frame in the sequence. The interrupt
James Smart895427b2017-02-12 13:52:30 -080018073 * service routine processes received buffers at interrupt contexts.
James Smart4f774512009-05-22 14:52:35 -040018074 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18075 * appropriate receive function when the final frame in a sequence is received.
18076 **/
James Smart4d9ab992009-10-02 15:16:39 -040018077void
18078lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18079 struct hbq_dmabuf *dmabuf)
James Smart4f774512009-05-22 14:52:35 -040018080{
James Smart4d9ab992009-10-02 15:16:39 -040018081 struct hbq_dmabuf *seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040018082 struct fc_frame_header *fc_hdr;
18083 struct lpfc_vport *vport;
18084 uint32_t fcfi;
James Smart939723a2012-05-09 21:19:03 -040018085 uint32_t did;
James Smart4f774512009-05-22 14:52:35 -040018086
James Smart4f774512009-05-22 14:52:35 -040018087 /* Process each received buffer */
James Smart4d9ab992009-10-02 15:16:39 -040018088 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart2ea259e2017-02-12 13:52:27 -080018089
James Smarte817e5d2018-12-13 15:17:53 -080018090 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18091 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18092 vport = phba->pport;
18093 /* Handle MDS Loopback frames */
18094 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18095 return;
18096 }
18097
James Smart4d9ab992009-10-02 15:16:39 -040018098 /* check to see if this a valid type of frame */
18099 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18100 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18101 return;
18102 }
James Smart2ea259e2017-02-12 13:52:27 -080018103
James Smart7851fe22011-07-22 18:36:52 -040018104 if ((bf_get(lpfc_cqe_code,
18105 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18106 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18107 &dmabuf->cq_event.cqe.rcqe_cmpl);
18108 else
18109 fcfi = bf_get(lpfc_rcqe_fcf_id,
18110 &dmabuf->cq_event.cqe.rcqe_cmpl);
James Smart939723a2012-05-09 21:19:03 -040018111
James Smarte62245d2019-08-14 16:57:08 -070018112 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18113 vport = phba->pport;
18114 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18115 "2023 MDS Loopback %d bytes\n",
18116 bf_get(lpfc_rcqe_length,
18117 &dmabuf->cq_event.cqe.rcqe_cmpl));
18118 /* Handle MDS Loopback frames */
18119 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18120 return;
18121 }
18122
James Smart895427b2017-02-12 13:52:30 -080018123 /* d_id this frame is directed to */
18124 did = sli4_did_from_fc_hdr(fc_hdr);
18125
18126 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
James Smart939723a2012-05-09 21:19:03 -040018127 if (!vport) {
James Smart4d9ab992009-10-02 15:16:39 -040018128 /* throw out the frame */
18129 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18130 return;
18131 }
James Smart939723a2012-05-09 21:19:03 -040018132
James Smart939723a2012-05-09 21:19:03 -040018133 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18134 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18135 (did != Fabric_DID)) {
18136 /*
18137 * Throw out the frame if we are not pt2pt.
18138 * The pt2pt protocol allows for discovery frames
18139 * to be received without a registered VPI.
18140 */
18141 if (!(vport->fc_flag & FC_PT2PT) ||
18142 (phba->link_state == LPFC_HBA_READY)) {
18143 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18144 return;
18145 }
18146 }
18147
James Smart6669f9b2009-10-02 15:16:45 -040018148 /* Handle the basic abort sequence (BA_ABTS) event */
18149 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18150 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18151 return;
18152 }
18153
James Smart4d9ab992009-10-02 15:16:39 -040018154 /* Link this frame */
18155 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18156 if (!seq_dmabuf) {
18157 /* unable to add frame to vport - throw it out */
18158 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18159 return;
18160 }
18161 /* If not last frame in sequence continue processing frames. */
James Smartdef9c7a2009-12-21 17:02:28 -050018162 if (!lpfc_seq_complete(seq_dmabuf))
James Smart4d9ab992009-10-02 15:16:39 -040018163 return;
James Smartdef9c7a2009-12-21 17:02:28 -050018164
James Smart6669f9b2009-10-02 15:16:45 -040018165 /* Send the complete sequence to the upper layer protocol */
18166 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
James Smart4f774512009-05-22 14:52:35 -040018167}
James Smart6fb120a2009-05-22 14:52:59 -040018168
18169/**
18170 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18171 * @phba: pointer to lpfc hba data structure.
18172 *
18173 * This routine is invoked to post rpi header templates to the
18174 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040018175 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18176 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040018177 *
18178 * This routine does not require any locks. It's usage is expected
18179 * to be driver load or reset recovery when the driver is
18180 * sequential.
18181 *
18182 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020018183 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040018184 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040018185 * When this error occurs, the driver is not guaranteed
18186 * to have any rpi regions posted to the device and
18187 * must either attempt to repost the regions or take a
18188 * fatal error.
18189 **/
18190int
18191lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18192{
18193 struct lpfc_rpi_hdr *rpi_page;
18194 uint32_t rc = 0;
James Smart6d368e52011-05-24 11:44:12 -040018195 uint16_t lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018196
James Smart6d368e52011-05-24 11:44:12 -040018197 /* SLI4 ports that support extents do not require RPI headers. */
18198 if (!phba->sli4_hba.rpi_hdrs_in_use)
18199 goto exit;
18200 if (phba->sli4_hba.extents_in_use)
18201 return -EIO;
18202
James Smart6fb120a2009-05-22 14:52:59 -040018203 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
James Smart6d368e52011-05-24 11:44:12 -040018204 /*
18205 * Assign the rpi headers a physical rpi only if the driver
18206 * has not initialized those resources. A port reset only
18207 * needs the headers posted.
18208 */
18209 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18210 LPFC_RPI_RSRC_RDY)
18211 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18212
James Smart6fb120a2009-05-22 14:52:59 -040018213 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18214 if (rc != MBX_SUCCESS) {
18215 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18216 "2008 Error %d posting all rpi "
18217 "headers\n", rc);
18218 rc = -EIO;
18219 break;
18220 }
18221 }
18222
James Smart6d368e52011-05-24 11:44:12 -040018223 exit:
18224 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18225 LPFC_RPI_RSRC_RDY);
James Smart6fb120a2009-05-22 14:52:59 -040018226 return rc;
18227}
18228
18229/**
18230 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18231 * @phba: pointer to lpfc hba data structure.
18232 * @rpi_page: pointer to the rpi memory region.
18233 *
18234 * This routine is invoked to post a single rpi header to the
18235 * HBA consistent with the SLI-4 interface spec. This memory region
18236 * maps up to 64 rpi context regions.
18237 *
18238 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020018239 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040018240 * -ENOMEM - No available memory
18241 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040018242 **/
18243int
18244lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18245{
18246 LPFC_MBOXQ_t *mboxq;
18247 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18248 uint32_t rc = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018249 uint32_t shdr_status, shdr_add_status;
18250 union lpfc_sli4_cfg_shdr *shdr;
18251
James Smart6d368e52011-05-24 11:44:12 -040018252 /* SLI4 ports that support extents do not require RPI headers. */
18253 if (!phba->sli4_hba.rpi_hdrs_in_use)
18254 return rc;
18255 if (phba->sli4_hba.extents_in_use)
18256 return -EIO;
18257
James Smart6fb120a2009-05-22 14:52:59 -040018258 /* The port is notified of the header region via a mailbox command. */
18259 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18260 if (!mboxq) {
18261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18262 "2001 Unable to allocate memory for issuing "
18263 "SLI_CONFIG_SPECIAL mailbox command\n");
18264 return -ENOMEM;
18265 }
18266
18267 /* Post all rpi memory regions to the port. */
18268 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
James Smart6fb120a2009-05-22 14:52:59 -040018269 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18270 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18271 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
James Smartfedd3b72011-02-16 12:39:24 -050018272 sizeof(struct lpfc_sli4_cfg_mhdr),
18273 LPFC_SLI4_MBX_EMBED);
James Smart6d368e52011-05-24 11:44:12 -040018274
18275
18276 /* Post the physical rpi to the port for this rpi header. */
James Smart6fb120a2009-05-22 14:52:59 -040018277 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18278 rpi_page->start_rpi);
James Smart6d368e52011-05-24 11:44:12 -040018279 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18280 hdr_tmpl, rpi_page->page_count);
18281
James Smart6fb120a2009-05-22 14:52:59 -040018282 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18283 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
James Smartf1126682009-06-10 17:22:44 -040018284 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart6fb120a2009-05-22 14:52:59 -040018285 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18286 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18287 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18288 if (rc != MBX_TIMEOUT)
18289 mempool_free(mboxq, phba->mbox_mem_pool);
18290 if (shdr_status || shdr_add_status || rc) {
18291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18292 "2514 POST_RPI_HDR mailbox failed with "
18293 "status x%x add_status x%x, mbx status x%x\n",
18294 shdr_status, shdr_add_status, rc);
18295 rc = -ENXIO;
James Smart845d9e82017-05-15 15:20:38 -070018296 } else {
18297 /*
18298 * The next_rpi stores the next logical module-64 rpi value used
18299 * to post physical rpis in subsequent rpi postings.
18300 */
18301 spin_lock_irq(&phba->hbalock);
18302 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18303 spin_unlock_irq(&phba->hbalock);
James Smart6fb120a2009-05-22 14:52:59 -040018304 }
18305 return rc;
18306}
18307
18308/**
18309 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18310 * @phba: pointer to lpfc hba data structure.
18311 *
18312 * This routine is invoked to post rpi header templates to the
18313 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040018314 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18315 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040018316 *
18317 * Returns
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020018318 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
James Smart6fb120a2009-05-22 14:52:59 -040018319 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18320 **/
18321int
18322lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18323{
James Smart6d368e52011-05-24 11:44:12 -040018324 unsigned long rpi;
18325 uint16_t max_rpi, rpi_limit;
18326 uint16_t rpi_remaining, lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018327 struct lpfc_rpi_hdr *rpi_hdr;
James Smart4902b382013-10-10 12:20:35 -040018328 unsigned long iflag;
James Smart6fb120a2009-05-22 14:52:59 -040018329
James Smart6fb120a2009-05-22 14:52:59 -040018330 /*
James Smart6d368e52011-05-24 11:44:12 -040018331 * Fetch the next logical rpi. Because this index is logical,
18332 * the driver starts at 0 each time.
James Smart6fb120a2009-05-22 14:52:59 -040018333 */
James Smart4902b382013-10-10 12:20:35 -040018334 spin_lock_irqsave(&phba->hbalock, iflag);
James Smartbe6bb942015-04-07 15:07:22 -040018335 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18336 rpi_limit = phba->sli4_hba.next_rpi;
18337
James Smart6d368e52011-05-24 11:44:12 -040018338 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18339 if (rpi >= rpi_limit)
James Smart6fb120a2009-05-22 14:52:59 -040018340 rpi = LPFC_RPI_ALLOC_ERROR;
18341 else {
18342 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18343 phba->sli4_hba.max_cfg_param.rpi_used++;
18344 phba->sli4_hba.rpi_count++;
18345 }
James Smart0f154222019-09-21 20:58:52 -070018346 lpfc_printf_log(phba, KERN_INFO,
18347 LOG_NODE | LOG_DISCOVERY,
18348 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
James Smartbe6bb942015-04-07 15:07:22 -040018349 (int) rpi, max_rpi, rpi_limit);
James Smart6fb120a2009-05-22 14:52:59 -040018350
18351 /*
18352 * Don't try to allocate more rpi header regions if the device limit
James Smart6d368e52011-05-24 11:44:12 -040018353 * has been exhausted.
James Smart6fb120a2009-05-22 14:52:59 -040018354 */
18355 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18356 (phba->sli4_hba.rpi_count >= max_rpi)) {
James Smart4902b382013-10-10 12:20:35 -040018357 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040018358 return rpi;
18359 }
18360
18361 /*
James Smart6d368e52011-05-24 11:44:12 -040018362 * RPI header postings are not required for SLI4 ports capable of
18363 * extents.
18364 */
18365 if (!phba->sli4_hba.rpi_hdrs_in_use) {
James Smart4902b382013-10-10 12:20:35 -040018366 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6d368e52011-05-24 11:44:12 -040018367 return rpi;
18368 }
18369
18370 /*
James Smart6fb120a2009-05-22 14:52:59 -040018371 * If the driver is running low on rpi resources, allocate another
18372 * page now. Note that the next_rpi value is used because
18373 * it represents how many are actually in use whereas max_rpi notes
18374 * how many are supported max by the device.
18375 */
James Smart6d368e52011-05-24 11:44:12 -040018376 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
James Smart4902b382013-10-10 12:20:35 -040018377 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040018378 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18379 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18380 if (!rpi_hdr) {
18381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18382 "2002 Error Could not grow rpi "
18383 "count\n");
18384 } else {
James Smart6d368e52011-05-24 11:44:12 -040018385 lrpi = rpi_hdr->start_rpi;
18386 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
James Smart6fb120a2009-05-22 14:52:59 -040018387 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18388 }
18389 }
18390
18391 return rpi;
18392}
18393
18394/**
18395 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18396 * @phba: pointer to lpfc hba data structure.
18397 *
18398 * This routine is invoked to release an rpi to the pool of
18399 * available rpis maintained by the driver.
18400 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040018401static void
James Smartd7c47992010-06-08 18:31:54 -040018402__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18403{
James Smart7cfd5632019-11-04 16:56:58 -080018404 /*
18405 * if the rpi value indicates a prior unreg has already
18406 * been done, skip the unreg.
18407 */
18408 if (rpi == LPFC_RPI_ALLOC_ERROR)
18409 return;
18410
James Smartd7c47992010-06-08 18:31:54 -040018411 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18412 phba->sli4_hba.rpi_count--;
18413 phba->sli4_hba.max_cfg_param.rpi_used--;
James Smartb95b2112019-08-14 16:56:47 -070018414 } else {
James Smart0f154222019-09-21 20:58:52 -070018415 lpfc_printf_log(phba, KERN_INFO,
18416 LOG_NODE | LOG_DISCOVERY,
James Smartb95b2112019-08-14 16:56:47 -070018417 "2016 rpi %x not inuse\n",
18418 rpi);
James Smartd7c47992010-06-08 18:31:54 -040018419 }
18420}
18421
18422/**
18423 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18424 * @phba: pointer to lpfc hba data structure.
18425 *
18426 * This routine is invoked to release an rpi to the pool of
18427 * available rpis maintained by the driver.
18428 **/
18429void
James Smart6fb120a2009-05-22 14:52:59 -040018430lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18431{
18432 spin_lock_irq(&phba->hbalock);
James Smartd7c47992010-06-08 18:31:54 -040018433 __lpfc_sli4_free_rpi(phba, rpi);
James Smart6fb120a2009-05-22 14:52:59 -040018434 spin_unlock_irq(&phba->hbalock);
18435}
18436
18437/**
18438 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18439 * @phba: pointer to lpfc hba data structure.
18440 *
18441 * This routine is invoked to remove the memory region that
18442 * provided rpi via a bitmask.
18443 **/
18444void
18445lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18446{
18447 kfree(phba->sli4_hba.rpi_bmask);
James Smart6d368e52011-05-24 11:44:12 -040018448 kfree(phba->sli4_hba.rpi_ids);
18449 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
James Smart6fb120a2009-05-22 14:52:59 -040018450}
18451
18452/**
18453 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18454 * @phba: pointer to lpfc hba data structure.
18455 *
18456 * This routine is invoked to remove the memory region that
18457 * provided rpi via a bitmask.
18458 **/
18459int
James Smart6b5151f2012-01-18 16:24:06 -050018460lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18461 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
James Smart6fb120a2009-05-22 14:52:59 -040018462{
18463 LPFC_MBOXQ_t *mboxq;
18464 struct lpfc_hba *phba = ndlp->phba;
18465 int rc;
18466
18467 /* The port is notified of the header region via a mailbox command. */
18468 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18469 if (!mboxq)
18470 return -ENOMEM;
18471
18472 /* Post all rpi memory regions to the port. */
18473 lpfc_resume_rpi(mboxq, ndlp);
James Smart6b5151f2012-01-18 16:24:06 -050018474 if (cmpl) {
18475 mboxq->mbox_cmpl = cmpl;
James Smart3e1f0712018-11-29 16:09:29 -080018476 mboxq->ctx_buf = arg;
18477 mboxq->ctx_ndlp = ndlp;
James Smart72859902012-01-18 16:25:38 -050018478 } else
18479 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart6b5151f2012-01-18 16:24:06 -050018480 mboxq->vport = ndlp->vport;
James Smart6fb120a2009-05-22 14:52:59 -040018481 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18482 if (rc == MBX_NOT_FINISHED) {
18483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18484 "2010 Resume RPI Mailbox failed "
18485 "status %d, mbxStatus x%x\n", rc,
18486 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18487 mempool_free(mboxq, phba->mbox_mem_pool);
18488 return -EIO;
18489 }
18490 return 0;
18491}
18492
18493/**
18494 * lpfc_sli4_init_vpi - Initialize a vpi with the port
James Smart76a95d72010-11-20 23:11:48 -050018495 * @vport: Pointer to the vport for which the vpi is being initialized
James Smart6fb120a2009-05-22 14:52:59 -040018496 *
James Smart76a95d72010-11-20 23:11:48 -050018497 * This routine is invoked to activate a vpi with the port.
James Smart6fb120a2009-05-22 14:52:59 -040018498 *
18499 * Returns:
18500 * 0 success
18501 * -Evalue otherwise
18502 **/
18503int
James Smart76a95d72010-11-20 23:11:48 -050018504lpfc_sli4_init_vpi(struct lpfc_vport *vport)
James Smart6fb120a2009-05-22 14:52:59 -040018505{
18506 LPFC_MBOXQ_t *mboxq;
18507 int rc = 0;
James Smart6a9c52c2009-10-02 15:16:51 -040018508 int retval = MBX_SUCCESS;
James Smart6fb120a2009-05-22 14:52:59 -040018509 uint32_t mbox_tmo;
James Smart76a95d72010-11-20 23:11:48 -050018510 struct lpfc_hba *phba = vport->phba;
James Smart6fb120a2009-05-22 14:52:59 -040018511 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18512 if (!mboxq)
18513 return -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -050018514 lpfc_init_vpi(phba, mboxq, vport->vpi);
James Smarta183a152011-10-10 21:32:43 -040018515 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
James Smart6fb120a2009-05-22 14:52:59 -040018516 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
James Smart6fb120a2009-05-22 14:52:59 -040018517 if (rc != MBX_SUCCESS) {
James Smart76a95d72010-11-20 23:11:48 -050018518 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
James Smart6fb120a2009-05-22 14:52:59 -040018519 "2022 INIT VPI Mailbox failed "
18520 "status %d, mbxStatus x%x\n", rc,
18521 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smart6a9c52c2009-10-02 15:16:51 -040018522 retval = -EIO;
James Smart6fb120a2009-05-22 14:52:59 -040018523 }
James Smart6a9c52c2009-10-02 15:16:51 -040018524 if (rc != MBX_TIMEOUT)
James Smart76a95d72010-11-20 23:11:48 -050018525 mempool_free(mboxq, vport->phba->mbox_mem_pool);
James Smart6a9c52c2009-10-02 15:16:51 -040018526
18527 return retval;
James Smart6fb120a2009-05-22 14:52:59 -040018528}
18529
18530/**
18531 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18532 * @phba: pointer to lpfc hba data structure.
18533 * @mboxq: Pointer to mailbox object.
18534 *
18535 * This routine is invoked to manually add a single FCF record. The caller
18536 * must pass a completely initialized FCF_Record. This routine takes
18537 * care of the nonembedded mailbox operations.
18538 **/
18539static void
18540lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18541{
18542 void *virt_addr;
18543 union lpfc_sli4_cfg_shdr *shdr;
18544 uint32_t shdr_status, shdr_add_status;
18545
18546 virt_addr = mboxq->sge_array->addr[0];
18547 /* The IOCTL status is embedded in the mailbox subheader. */
18548 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18549 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18550 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18551
18552 if ((shdr_status || shdr_add_status) &&
18553 (shdr_status != STATUS_FCF_IN_USE))
18554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18555 "2558 ADD_FCF_RECORD mailbox failed with "
18556 "status x%x add_status x%x\n",
18557 shdr_status, shdr_add_status);
18558
18559 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18560}
18561
18562/**
18563 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18564 * @phba: pointer to lpfc hba data structure.
18565 * @fcf_record: pointer to the initialized fcf record to add.
18566 *
18567 * This routine is invoked to manually add a single FCF record. The caller
18568 * must pass a completely initialized FCF_Record. This routine takes
18569 * care of the nonembedded mailbox operations.
18570 **/
18571int
18572lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18573{
18574 int rc = 0;
18575 LPFC_MBOXQ_t *mboxq;
18576 uint8_t *bytep;
18577 void *virt_addr;
James Smart6fb120a2009-05-22 14:52:59 -040018578 struct lpfc_mbx_sge sge;
18579 uint32_t alloc_len, req_len;
18580 uint32_t fcfindex;
18581
18582 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18583 if (!mboxq) {
18584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18585 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18586 return -ENOMEM;
18587 }
18588
18589 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18590 sizeof(uint32_t);
18591
18592 /* Allocate DMA memory and set up the non-embedded mailbox command */
18593 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18594 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18595 req_len, LPFC_SLI4_MBX_NEMBED);
18596 if (alloc_len < req_len) {
18597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18598 "2523 Allocated DMA memory size (x%x) is "
18599 "less than the requested DMA memory "
18600 "size (x%x)\n", alloc_len, req_len);
18601 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18602 return -ENOMEM;
18603 }
18604
18605 /*
18606 * Get the first SGE entry from the non-embedded DMA memory. This
18607 * routine only uses a single SGE.
18608 */
18609 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
James Smart6fb120a2009-05-22 14:52:59 -040018610 virt_addr = mboxq->sge_array->addr[0];
18611 /*
18612 * Configure the FCF record for FCFI 0. This is the driver's
18613 * hardcoded default and gets used in nonFIP mode.
18614 */
18615 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18616 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18617 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18618
18619 /*
18620 * Copy the fcf_index and the FCF Record Data. The data starts after
18621 * the FCoE header plus word10. The data copy needs to be endian
18622 * correct.
18623 */
18624 bytep += sizeof(uint32_t);
18625 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18626 mboxq->vport = phba->pport;
18627 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18628 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18629 if (rc == MBX_NOT_FINISHED) {
18630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18631 "2515 ADD_FCF_RECORD mailbox failed with "
18632 "status 0x%x\n", rc);
18633 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18634 rc = -EIO;
18635 } else
18636 rc = 0;
18637
18638 return rc;
18639}
18640
18641/**
18642 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18643 * @phba: pointer to lpfc hba data structure.
18644 * @fcf_record: pointer to the fcf record to write the default data.
18645 * @fcf_index: FCF table entry index.
18646 *
18647 * This routine is invoked to build the driver's default FCF record. The
18648 * values used are hardcoded. This routine handles memory initialization.
18649 *
18650 **/
18651void
18652lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18653 struct fcf_record *fcf_record,
18654 uint16_t fcf_index)
18655{
18656 memset(fcf_record, 0, sizeof(struct fcf_record));
18657 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18658 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18659 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18660 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18661 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18662 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18663 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18664 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18665 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18666 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18667 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18668 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18669 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
James Smart0c287582009-06-10 17:22:56 -040018670 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
James Smart6fb120a2009-05-22 14:52:59 -040018671 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18672 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18673 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18674 /* Set the VLAN bit map */
18675 if (phba->valid_vlan) {
18676 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18677 = 1 << (phba->vlan_id % 8);
18678 }
18679}
18680
18681/**
James Smart0c9ab6f2010-02-26 14:15:57 -050018682 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
James Smart6fb120a2009-05-22 14:52:59 -040018683 * @phba: pointer to lpfc hba data structure.
18684 * @fcf_index: FCF table entry offset.
18685 *
James Smart0c9ab6f2010-02-26 14:15:57 -050018686 * This routine is invoked to scan the entire FCF table by reading FCF
18687 * record and processing it one at a time starting from the @fcf_index
18688 * for initial FCF discovery or fast FCF failover rediscovery.
18689 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030018690 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050018691 * otherwise.
James Smart6fb120a2009-05-22 14:52:59 -040018692 **/
18693int
James Smart0c9ab6f2010-02-26 14:15:57 -050018694lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
James Smart6fb120a2009-05-22 14:52:59 -040018695{
18696 int rc = 0, error;
18697 LPFC_MBOXQ_t *mboxq;
James Smart6fb120a2009-05-22 14:52:59 -040018698
James Smart32b97932009-07-19 10:01:21 -040018699 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
James Smart80c17842012-03-01 22:35:45 -050018700 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
James Smart6fb120a2009-05-22 14:52:59 -040018701 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18702 if (!mboxq) {
18703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18704 "2000 Failed to allocate mbox for "
18705 "READ_FCF cmd\n");
James Smart4d9ab992009-10-02 15:16:39 -040018706 error = -ENOMEM;
James Smart0c9ab6f2010-02-26 14:15:57 -050018707 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040018708 }
James Smartecfd03c2010-02-12 14:41:27 -050018709 /* Construct the read FCF record mailbox command */
James Smart0c9ab6f2010-02-26 14:15:57 -050018710 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
James Smartecfd03c2010-02-12 14:41:27 -050018711 if (rc) {
18712 error = -EINVAL;
James Smart0c9ab6f2010-02-26 14:15:57 -050018713 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040018714 }
James Smartecfd03c2010-02-12 14:41:27 -050018715 /* Issue the mailbox command asynchronously */
James Smart6fb120a2009-05-22 14:52:59 -040018716 mboxq->vport = phba->pport;
James Smart0c9ab6f2010-02-26 14:15:57 -050018717 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
James Smarta93ff372010-10-22 11:06:08 -040018718
18719 spin_lock_irq(&phba->hbalock);
18720 phba->hba_flag |= FCF_TS_INPROG;
18721 spin_unlock_irq(&phba->hbalock);
18722
James Smart6fb120a2009-05-22 14:52:59 -040018723 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
James Smartecfd03c2010-02-12 14:41:27 -050018724 if (rc == MBX_NOT_FINISHED)
James Smart6fb120a2009-05-22 14:52:59 -040018725 error = -EIO;
James Smartecfd03c2010-02-12 14:41:27 -050018726 else {
James Smart38b92ef2010-08-04 16:11:39 -040018727 /* Reset eligible FCF count for new scan */
18728 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
James Smart999d8132010-03-15 11:24:56 -040018729 phba->fcf.eligible_fcf_cnt = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018730 error = 0;
James Smart32b97932009-07-19 10:01:21 -040018731 }
James Smart0c9ab6f2010-02-26 14:15:57 -050018732fail_fcf_scan:
James Smart4d9ab992009-10-02 15:16:39 -040018733 if (error) {
18734 if (mboxq)
18735 lpfc_sli4_mbox_cmd_free(phba, mboxq);
James Smarta93ff372010-10-22 11:06:08 -040018736 /* FCF scan failed, clear FCF_TS_INPROG flag */
James Smart4d9ab992009-10-02 15:16:39 -040018737 spin_lock_irq(&phba->hbalock);
James Smarta93ff372010-10-22 11:06:08 -040018738 phba->hba_flag &= ~FCF_TS_INPROG;
James Smart4d9ab992009-10-02 15:16:39 -040018739 spin_unlock_irq(&phba->hbalock);
18740 }
James Smart6fb120a2009-05-22 14:52:59 -040018741 return error;
18742}
James Smarta0c87cb2009-07-19 10:01:10 -040018743
18744/**
James Smarta93ff372010-10-22 11:06:08 -040018745 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
James Smart0c9ab6f2010-02-26 14:15:57 -050018746 * @phba: pointer to lpfc hba data structure.
18747 * @fcf_index: FCF table entry offset.
18748 *
18749 * This routine is invoked to read an FCF record indicated by @fcf_index
James Smarta93ff372010-10-22 11:06:08 -040018750 * and to use it for FLOGI roundrobin FCF failover.
James Smart0c9ab6f2010-02-26 14:15:57 -050018751 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030018752 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050018753 * otherwise.
18754 **/
18755int
18756lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18757{
18758 int rc = 0, error;
18759 LPFC_MBOXQ_t *mboxq;
18760
18761 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18762 if (!mboxq) {
18763 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18764 "2763 Failed to allocate mbox for "
18765 "READ_FCF cmd\n");
18766 error = -ENOMEM;
18767 goto fail_fcf_read;
18768 }
18769 /* Construct the read FCF record mailbox command */
18770 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18771 if (rc) {
18772 error = -EINVAL;
18773 goto fail_fcf_read;
18774 }
18775 /* Issue the mailbox command asynchronously */
18776 mboxq->vport = phba->pport;
18777 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18778 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18779 if (rc == MBX_NOT_FINISHED)
18780 error = -EIO;
18781 else
18782 error = 0;
18783
18784fail_fcf_read:
18785 if (error && mboxq)
18786 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18787 return error;
18788}
18789
18790/**
18791 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18792 * @phba: pointer to lpfc hba data structure.
18793 * @fcf_index: FCF table entry offset.
18794 *
18795 * This routine is invoked to read an FCF record indicated by @fcf_index to
James Smarta93ff372010-10-22 11:06:08 -040018796 * determine whether it's eligible for FLOGI roundrobin failover list.
James Smart0c9ab6f2010-02-26 14:15:57 -050018797 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030018798 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050018799 * otherwise.
18800 **/
18801int
18802lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18803{
18804 int rc = 0, error;
18805 LPFC_MBOXQ_t *mboxq;
18806
18807 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18808 if (!mboxq) {
18809 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18810 "2758 Failed to allocate mbox for "
18811 "READ_FCF cmd\n");
18812 error = -ENOMEM;
18813 goto fail_fcf_read;
18814 }
18815 /* Construct the read FCF record mailbox command */
18816 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18817 if (rc) {
18818 error = -EINVAL;
18819 goto fail_fcf_read;
18820 }
18821 /* Issue the mailbox command asynchronously */
18822 mboxq->vport = phba->pport;
18823 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18824 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18825 if (rc == MBX_NOT_FINISHED)
18826 error = -EIO;
18827 else
18828 error = 0;
18829
18830fail_fcf_read:
18831 if (error && mboxq)
18832 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18833 return error;
18834}
18835
18836/**
James Smartf5cb5302015-12-16 18:11:52 -050018837 * lpfc_check_next_fcf_pri_level
James Smart7d791df2011-07-22 18:37:52 -040018838 * phba pointer to the lpfc_hba struct for this port.
18839 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18840 * routine when the rr_bmask is empty. The FCF indecies are put into the
18841 * rr_bmask based on their priority level. Starting from the highest priority
18842 * to the lowest. The most likely FCF candidate will be in the highest
18843 * priority group. When this routine is called it searches the fcf_pri list for
18844 * next lowest priority group and repopulates the rr_bmask with only those
18845 * fcf_indexes.
18846 * returns:
18847 * 1=success 0=failure
18848 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040018849static int
James Smart7d791df2011-07-22 18:37:52 -040018850lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18851{
18852 uint16_t next_fcf_pri;
18853 uint16_t last_index;
18854 struct lpfc_fcf_pri *fcf_pri;
18855 int rc;
18856 int ret = 0;
18857
18858 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18859 LPFC_SLI4_FCF_TBL_INDX_MAX);
18860 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18861 "3060 Last IDX %d\n", last_index);
James Smart25626692013-03-01 16:36:54 -050018862
18863 /* Verify the priority list has 2 or more entries */
18864 spin_lock_irq(&phba->hbalock);
18865 if (list_empty(&phba->fcf.fcf_pri_list) ||
18866 list_is_singular(&phba->fcf.fcf_pri_list)) {
18867 spin_unlock_irq(&phba->hbalock);
James Smart7d791df2011-07-22 18:37:52 -040018868 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18869 "3061 Last IDX %d\n", last_index);
18870 return 0; /* Empty rr list */
18871 }
James Smart25626692013-03-01 16:36:54 -050018872 spin_unlock_irq(&phba->hbalock);
18873
James Smart7d791df2011-07-22 18:37:52 -040018874 next_fcf_pri = 0;
18875 /*
18876 * Clear the rr_bmask and set all of the bits that are at this
18877 * priority.
18878 */
18879 memset(phba->fcf.fcf_rr_bmask, 0,
18880 sizeof(*phba->fcf.fcf_rr_bmask));
18881 spin_lock_irq(&phba->hbalock);
18882 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18883 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18884 continue;
18885 /*
18886 * the 1st priority that has not FLOGI failed
18887 * will be the highest.
18888 */
18889 if (!next_fcf_pri)
18890 next_fcf_pri = fcf_pri->fcf_rec.priority;
18891 spin_unlock_irq(&phba->hbalock);
18892 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18893 rc = lpfc_sli4_fcf_rr_index_set(phba,
18894 fcf_pri->fcf_rec.fcf_index);
18895 if (rc)
18896 return 0;
18897 }
18898 spin_lock_irq(&phba->hbalock);
18899 }
18900 /*
18901 * if next_fcf_pri was not set above and the list is not empty then
18902 * we have failed flogis on all of them. So reset flogi failed
Anatol Pomozov4907cb72012-09-01 10:31:09 -070018903 * and start at the beginning.
James Smart7d791df2011-07-22 18:37:52 -040018904 */
18905 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18906 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18907 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18908 /*
18909 * the 1st priority that has not FLOGI failed
18910 * will be the highest.
18911 */
18912 if (!next_fcf_pri)
18913 next_fcf_pri = fcf_pri->fcf_rec.priority;
18914 spin_unlock_irq(&phba->hbalock);
18915 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18916 rc = lpfc_sli4_fcf_rr_index_set(phba,
18917 fcf_pri->fcf_rec.fcf_index);
18918 if (rc)
18919 return 0;
18920 }
18921 spin_lock_irq(&phba->hbalock);
18922 }
18923 } else
18924 ret = 1;
18925 spin_unlock_irq(&phba->hbalock);
18926
18927 return ret;
18928}
18929/**
James Smart0c9ab6f2010-02-26 14:15:57 -050018930 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18931 * @phba: pointer to lpfc hba data structure.
18932 *
18933 * This routine is to get the next eligible FCF record index in a round
18934 * robin fashion. If the next eligible FCF record index equals to the
James Smarta93ff372010-10-22 11:06:08 -040018935 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
James Smart0c9ab6f2010-02-26 14:15:57 -050018936 * shall be returned, otherwise, the next eligible FCF record's index
18937 * shall be returned.
18938 **/
18939uint16_t
18940lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18941{
18942 uint16_t next_fcf_index;
18943
James Smart421c6622013-01-03 15:44:16 -050018944initial_priority:
James Smart3804dc82010-07-14 15:31:37 -040018945 /* Search start from next bit of currently registered FCF index */
James Smart421c6622013-01-03 15:44:16 -050018946 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18947
James Smart7d791df2011-07-22 18:37:52 -040018948next_priority:
James Smart421c6622013-01-03 15:44:16 -050018949 /* Determine the next fcf index to check */
18950 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
James Smart0c9ab6f2010-02-26 14:15:57 -050018951 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18952 LPFC_SLI4_FCF_TBL_INDX_MAX,
James Smart3804dc82010-07-14 15:31:37 -040018953 next_fcf_index);
18954
James Smart0c9ab6f2010-02-26 14:15:57 -050018955 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
James Smart7d791df2011-07-22 18:37:52 -040018956 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18957 /*
18958 * If we have wrapped then we need to clear the bits that
18959 * have been tested so that we can detect when we should
18960 * change the priority level.
18961 */
James Smart0c9ab6f2010-02-26 14:15:57 -050018962 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18963 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
James Smart7d791df2011-07-22 18:37:52 -040018964 }
18965
James Smart0c9ab6f2010-02-26 14:15:57 -050018966
James Smart3804dc82010-07-14 15:31:37 -040018967 /* Check roundrobin failover list empty condition */
James Smart7d791df2011-07-22 18:37:52 -040018968 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18969 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18970 /*
18971 * If next fcf index is not found check if there are lower
18972 * Priority level fcf's in the fcf_priority list.
18973 * Set up the rr_bmask with all of the avaiable fcf bits
18974 * at that level and continue the selection process.
18975 */
18976 if (lpfc_check_next_fcf_pri_level(phba))
James Smart421c6622013-01-03 15:44:16 -050018977 goto initial_priority;
James Smart3804dc82010-07-14 15:31:37 -040018978 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18979 "2844 No roundrobin failover FCF available\n");
James Smart036cad12018-10-23 13:41:06 -070018980
18981 return LPFC_FCOE_FCF_NEXT_NONE;
James Smart3804dc82010-07-14 15:31:37 -040018982 }
18983
James Smart7d791df2011-07-22 18:37:52 -040018984 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18985 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
James Smartf5cb5302015-12-16 18:11:52 -050018986 LPFC_FCF_FLOGI_FAILED) {
18987 if (list_is_singular(&phba->fcf.fcf_pri_list))
18988 return LPFC_FCOE_FCF_NEXT_NONE;
18989
James Smart7d791df2011-07-22 18:37:52 -040018990 goto next_priority;
James Smartf5cb5302015-12-16 18:11:52 -050018991 }
James Smart7d791df2011-07-22 18:37:52 -040018992
James Smart3804dc82010-07-14 15:31:37 -040018993 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040018994 "2845 Get next roundrobin failover FCF (x%x)\n",
18995 next_fcf_index);
18996
James Smart0c9ab6f2010-02-26 14:15:57 -050018997 return next_fcf_index;
18998}
18999
19000/**
19001 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19002 * @phba: pointer to lpfc hba data structure.
19003 *
19004 * This routine sets the FCF record index in to the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040019005 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050019006 * does not go beyond the range of the driver allocated bmask dimension
19007 * before setting the bit.
19008 *
19009 * Returns 0 if the index bit successfully set, otherwise, it returns
19010 * -EINVAL.
19011 **/
19012int
19013lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19014{
19015 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19016 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019017 "2610 FCF (x%x) reached driver's book "
19018 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050019019 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19020 return -EINVAL;
19021 }
19022 /* Set the eligible FCF record index bmask */
19023 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19024
James Smart3804dc82010-07-14 15:31:37 -040019025 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019026 "2790 Set FCF (x%x) to roundrobin FCF failover "
James Smart3804dc82010-07-14 15:31:37 -040019027 "bmask\n", fcf_index);
19028
James Smart0c9ab6f2010-02-26 14:15:57 -050019029 return 0;
19030}
19031
19032/**
James Smart3804dc82010-07-14 15:31:37 -040019033 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
James Smart0c9ab6f2010-02-26 14:15:57 -050019034 * @phba: pointer to lpfc hba data structure.
19035 *
19036 * This routine clears the FCF record index from the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040019037 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050019038 * does not go beyond the range of the driver allocated bmask dimension
19039 * before clearing the bit.
19040 **/
19041void
19042lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19043{
James Smart9a803a72013-09-06 12:17:56 -040019044 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
James Smart0c9ab6f2010-02-26 14:15:57 -050019045 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19046 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019047 "2762 FCF (x%x) reached driver's book "
19048 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050019049 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19050 return;
19051 }
19052 /* Clear the eligible FCF record index bmask */
James Smart7d791df2011-07-22 18:37:52 -040019053 spin_lock_irq(&phba->hbalock);
James Smart9a803a72013-09-06 12:17:56 -040019054 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19055 list) {
James Smart7d791df2011-07-22 18:37:52 -040019056 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19057 list_del_init(&fcf_pri->list);
19058 break;
19059 }
19060 }
19061 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050019062 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
James Smart3804dc82010-07-14 15:31:37 -040019063
19064 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019065 "2791 Clear FCF (x%x) from roundrobin failover "
James Smart3804dc82010-07-14 15:31:37 -040019066 "bmask\n", fcf_index);
James Smart0c9ab6f2010-02-26 14:15:57 -050019067}
19068
19069/**
James Smartecfd03c2010-02-12 14:41:27 -050019070 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19071 * @phba: pointer to lpfc hba data structure.
19072 *
19073 * This routine is the completion routine for the rediscover FCF table mailbox
19074 * command. If the mailbox command returned failure, it will try to stop the
19075 * FCF rediscover wait timer.
19076 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040019077static void
James Smartecfd03c2010-02-12 14:41:27 -050019078lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19079{
19080 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19081 uint32_t shdr_status, shdr_add_status;
19082
19083 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19084
19085 shdr_status = bf_get(lpfc_mbox_hdr_status,
19086 &redisc_fcf->header.cfg_shdr.response);
19087 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19088 &redisc_fcf->header.cfg_shdr.response);
19089 if (shdr_status || shdr_add_status) {
James Smart0c9ab6f2010-02-26 14:15:57 -050019090 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smartecfd03c2010-02-12 14:41:27 -050019091 "2746 Requesting for FCF rediscovery failed "
19092 "status x%x add_status x%x\n",
19093 shdr_status, shdr_add_status);
James Smart0c9ab6f2010-02-26 14:15:57 -050019094 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
James Smartfc2b9892010-02-26 14:15:29 -050019095 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050019096 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050019097 spin_unlock_irq(&phba->hbalock);
19098 /*
19099 * CVL event triggered FCF rediscover request failed,
19100 * last resort to re-try current registered FCF entry.
19101 */
19102 lpfc_retry_pport_discovery(phba);
19103 } else {
19104 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050019105 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050019106 spin_unlock_irq(&phba->hbalock);
19107 /*
19108 * DEAD FCF event triggered FCF rediscover request
19109 * failed, last resort to fail over as a link down
19110 * to FCF registration.
19111 */
19112 lpfc_sli4_fcf_dead_failthrough(phba);
19113 }
James Smart0c9ab6f2010-02-26 14:15:57 -050019114 } else {
19115 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019116 "2775 Start FCF rediscover quiescent timer\n");
James Smartecfd03c2010-02-12 14:41:27 -050019117 /*
19118 * Start FCF rediscovery wait timer for pending FCF
19119 * before rescan FCF record table.
19120 */
19121 lpfc_fcf_redisc_wait_start_timer(phba);
James Smart0c9ab6f2010-02-26 14:15:57 -050019122 }
James Smartecfd03c2010-02-12 14:41:27 -050019123
19124 mempool_free(mbox, phba->mbox_mem_pool);
19125}
19126
19127/**
James Smart3804dc82010-07-14 15:31:37 -040019128 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
James Smartecfd03c2010-02-12 14:41:27 -050019129 * @phba: pointer to lpfc hba data structure.
19130 *
19131 * This routine is invoked to request for rediscovery of the entire FCF table
19132 * by the port.
19133 **/
19134int
19135lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19136{
19137 LPFC_MBOXQ_t *mbox;
19138 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19139 int rc, length;
19140
James Smart0c9ab6f2010-02-26 14:15:57 -050019141 /* Cancel retry delay timers to all vports before FCF rediscover */
19142 lpfc_cancel_all_vport_retry_delay_timer(phba);
19143
James Smartecfd03c2010-02-12 14:41:27 -050019144 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19145 if (!mbox) {
19146 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19147 "2745 Failed to allocate mbox for "
19148 "requesting FCF rediscover.\n");
19149 return -ENOMEM;
19150 }
19151
19152 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19153 sizeof(struct lpfc_sli4_cfg_mhdr));
19154 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19155 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19156 length, LPFC_SLI4_MBX_EMBED);
19157
19158 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19159 /* Set count to 0 for invalidating the entire FCF database */
19160 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19161
19162 /* Issue the mailbox command asynchronously */
19163 mbox->vport = phba->pport;
19164 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19165 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19166
19167 if (rc == MBX_NOT_FINISHED) {
19168 mempool_free(mbox, phba->mbox_mem_pool);
19169 return -EIO;
19170 }
19171 return 0;
19172}
19173
19174/**
James Smartfc2b9892010-02-26 14:15:29 -050019175 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19176 * @phba: pointer to lpfc hba data structure.
19177 *
19178 * This function is the failover routine as a last resort to the FCF DEAD
19179 * event when driver failed to perform fast FCF failover.
19180 **/
19181void
19182lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19183{
19184 uint32_t link_state;
19185
19186 /*
19187 * Last resort as FCF DEAD event failover will treat this as
19188 * a link down, but save the link state because we don't want
19189 * it to be changed to Link Down unless it is already down.
19190 */
19191 link_state = phba->link_state;
19192 lpfc_linkdown(phba);
19193 phba->link_state = link_state;
19194
19195 /* Unregister FCF if no devices connected to it */
19196 lpfc_unregister_unused_fcf(phba);
19197}
19198
19199/**
James Smart026abb82011-12-13 13:20:45 -050019200 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040019201 * @phba: pointer to lpfc hba data structure.
James Smart026abb82011-12-13 13:20:45 -050019202 * @rgn23_data: pointer to configure region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040019203 *
James Smart026abb82011-12-13 13:20:45 -050019204 * This function gets SLI3 port configure region 23 data through memory dump
19205 * mailbox command. When it successfully retrieves data, the size of the data
19206 * will be returned, otherwise, 0 will be returned.
James Smarta0c87cb2009-07-19 10:01:10 -040019207 **/
James Smart026abb82011-12-13 13:20:45 -050019208static uint32_t
19209lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
James Smarta0c87cb2009-07-19 10:01:10 -040019210{
19211 LPFC_MBOXQ_t *pmb = NULL;
19212 MAILBOX_t *mb;
James Smart026abb82011-12-13 13:20:45 -050019213 uint32_t offset = 0;
James Smarta0c87cb2009-07-19 10:01:10 -040019214 int rc;
19215
James Smart026abb82011-12-13 13:20:45 -050019216 if (!rgn23_data)
19217 return 0;
19218
James Smarta0c87cb2009-07-19 10:01:10 -040019219 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19220 if (!pmb) {
19221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050019222 "2600 failed to allocate mailbox memory\n");
19223 return 0;
James Smarta0c87cb2009-07-19 10:01:10 -040019224 }
19225 mb = &pmb->u.mb;
19226
James Smarta0c87cb2009-07-19 10:01:10 -040019227 do {
19228 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19229 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19230
19231 if (rc != MBX_SUCCESS) {
19232 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050019233 "2601 failed to read config "
19234 "region 23, rc 0x%x Status 0x%x\n",
19235 rc, mb->mbxStatus);
James Smarta0c87cb2009-07-19 10:01:10 -040019236 mb->un.varDmp.word_cnt = 0;
19237 }
19238 /*
19239 * dump mem may return a zero when finished or we got a
19240 * mailbox error, either way we are done.
19241 */
19242 if (mb->un.varDmp.word_cnt == 0)
19243 break;
19244 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19245 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19246
19247 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
James Smart026abb82011-12-13 13:20:45 -050019248 rgn23_data + offset,
19249 mb->un.varDmp.word_cnt);
James Smarta0c87cb2009-07-19 10:01:10 -040019250 offset += mb->un.varDmp.word_cnt;
19251 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19252
James Smart026abb82011-12-13 13:20:45 -050019253 mempool_free(pmb, phba->mbox_mem_pool);
19254 return offset;
19255}
19256
19257/**
19258 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19259 * @phba: pointer to lpfc hba data structure.
19260 * @rgn23_data: pointer to configure region 23 data.
19261 *
19262 * This function gets SLI4 port configure region 23 data through memory dump
19263 * mailbox command. When it successfully retrieves data, the size of the data
19264 * will be returned, otherwise, 0 will be returned.
19265 **/
19266static uint32_t
19267lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19268{
19269 LPFC_MBOXQ_t *mboxq = NULL;
19270 struct lpfc_dmabuf *mp = NULL;
19271 struct lpfc_mqe *mqe;
19272 uint32_t data_length = 0;
19273 int rc;
19274
19275 if (!rgn23_data)
19276 return 0;
19277
19278 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19279 if (!mboxq) {
19280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19281 "3105 failed to allocate mailbox memory\n");
19282 return 0;
19283 }
19284
19285 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19286 goto out;
19287 mqe = &mboxq->u.mqe;
James Smart3e1f0712018-11-29 16:09:29 -080019288 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
James Smart026abb82011-12-13 13:20:45 -050019289 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19290 if (rc)
19291 goto out;
19292 data_length = mqe->un.mb_words[5];
19293 if (data_length == 0)
19294 goto out;
19295 if (data_length > DMP_RGN23_SIZE) {
19296 data_length = 0;
19297 goto out;
19298 }
19299 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19300out:
19301 mempool_free(mboxq, phba->mbox_mem_pool);
19302 if (mp) {
19303 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19304 kfree(mp);
19305 }
19306 return data_length;
19307}
19308
19309/**
19310 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19311 * @phba: pointer to lpfc hba data structure.
19312 *
19313 * This function read region 23 and parse TLV for port status to
19314 * decide if the user disaled the port. If the TLV indicates the
19315 * port is disabled, the hba_flag is set accordingly.
19316 **/
19317void
19318lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19319{
19320 uint8_t *rgn23_data = NULL;
19321 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19322 uint32_t offset = 0;
19323
19324 /* Get adapter Region 23 data */
19325 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19326 if (!rgn23_data)
19327 goto out;
19328
19329 if (phba->sli_rev < LPFC_SLI_REV4)
19330 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19331 else {
19332 if_type = bf_get(lpfc_sli_intf_if_type,
19333 &phba->sli4_hba.sli_intf);
19334 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19335 goto out;
19336 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19337 }
James Smarta0c87cb2009-07-19 10:01:10 -040019338
19339 if (!data_size)
19340 goto out;
19341
19342 /* Check the region signature first */
19343 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19345 "2619 Config region 23 has bad signature\n");
19346 goto out;
19347 }
19348 offset += 4;
19349
19350 /* Check the data structure version */
19351 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19353 "2620 Config region 23 has bad version\n");
19354 goto out;
19355 }
19356 offset += 4;
19357
19358 /* Parse TLV entries in the region */
19359 while (offset < data_size) {
19360 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19361 break;
19362 /*
19363 * If the TLV is not driver specific TLV or driver id is
19364 * not linux driver id, skip the record.
19365 */
19366 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19367 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19368 (rgn23_data[offset + 3] != 0)) {
19369 offset += rgn23_data[offset + 1] * 4 + 4;
19370 continue;
19371 }
19372
19373 /* Driver found a driver specific TLV in the config region */
19374 sub_tlv_len = rgn23_data[offset + 1] * 4;
19375 offset += 4;
19376 tlv_offset = 0;
19377
19378 /*
19379 * Search for configured port state sub-TLV.
19380 */
19381 while ((offset < data_size) &&
19382 (tlv_offset < sub_tlv_len)) {
19383 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19384 offset += 4;
19385 tlv_offset += 4;
19386 break;
19387 }
19388 if (rgn23_data[offset] != PORT_STE_TYPE) {
19389 offset += rgn23_data[offset + 1] * 4 + 4;
19390 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19391 continue;
19392 }
19393
19394 /* This HBA contains PORT_STE configured */
19395 if (!rgn23_data[offset + 2])
19396 phba->hba_flag |= LINK_DISABLED;
19397
19398 goto out;
19399 }
19400 }
James Smart026abb82011-12-13 13:20:45 -050019401
James Smarta0c87cb2009-07-19 10:01:10 -040019402out:
James Smarta0c87cb2009-07-19 10:01:10 -040019403 kfree(rgn23_data);
19404 return;
19405}
James Smart695a8142010-01-26 23:08:03 -050019406
19407/**
James Smart52d52442011-05-24 11:42:45 -040019408 * lpfc_wr_object - write an object to the firmware
19409 * @phba: HBA structure that indicates port to create a queue on.
19410 * @dmabuf_list: list of dmabufs to write to the port.
19411 * @size: the total byte value of the objects to write to the port.
19412 * @offset: the current offset to be used to start the transfer.
19413 *
19414 * This routine will create a wr_object mailbox command to send to the port.
19415 * the mailbox command will be constructed using the dma buffers described in
19416 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19417 * BDEs that the imbedded mailbox can support. The @offset variable will be
19418 * used to indicate the starting offset of the transfer and will also return
19419 * the offset after the write object mailbox has completed. @size is used to
19420 * determine the end of the object and whether the eof bit should be set.
19421 *
19422 * Return 0 is successful and offset will contain the the new offset to use
19423 * for the next write.
19424 * Return negative value for error cases.
19425 **/
19426int
19427lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19428 uint32_t size, uint32_t *offset)
19429{
19430 struct lpfc_mbx_wr_object *wr_object;
19431 LPFC_MBOXQ_t *mbox;
19432 int rc = 0, i = 0;
James Smartf3d0a8a2019-12-18 15:58:01 -080019433 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
James Smart52d52442011-05-24 11:42:45 -040019434 uint32_t mbox_tmo;
James Smart52d52442011-05-24 11:42:45 -040019435 struct lpfc_dmabuf *dmabuf;
19436 uint32_t written = 0;
James Smart50212672018-12-13 15:17:57 -080019437 bool check_change_status = false;
James Smart52d52442011-05-24 11:42:45 -040019438
19439 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19440 if (!mbox)
19441 return -ENOMEM;
19442
19443 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19444 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19445 sizeof(struct lpfc_mbx_wr_object) -
19446 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19447
19448 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19449 wr_object->u.request.write_offset = *offset;
19450 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19451 wr_object->u.request.object_name[0] =
19452 cpu_to_le32(wr_object->u.request.object_name[0]);
19453 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19454 list_for_each_entry(dmabuf, dmabuf_list, list) {
19455 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19456 break;
19457 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19458 wr_object->u.request.bde[i].addrHigh =
19459 putPaddrHigh(dmabuf->phys);
19460 if (written + SLI4_PAGE_SIZE >= size) {
19461 wr_object->u.request.bde[i].tus.f.bdeSize =
19462 (size - written);
19463 written += (size - written);
19464 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
James Smart50212672018-12-13 15:17:57 -080019465 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19466 check_change_status = true;
James Smart52d52442011-05-24 11:42:45 -040019467 } else {
19468 wr_object->u.request.bde[i].tus.f.bdeSize =
19469 SLI4_PAGE_SIZE;
19470 written += SLI4_PAGE_SIZE;
19471 }
19472 i++;
19473 }
19474 wr_object->u.request.bde_count = i;
19475 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19476 if (!phba->sli4_hba.intr_enable)
19477 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19478 else {
James Smarta183a152011-10-10 21:32:43 -040019479 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart52d52442011-05-24 11:42:45 -040019480 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19481 }
19482 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart50212672018-12-13 15:17:57 -080019483 shdr_status = bf_get(lpfc_mbox_hdr_status,
19484 &wr_object->header.cfg_shdr.response);
19485 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19486 &wr_object->header.cfg_shdr.response);
19487 if (check_change_status) {
19488 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19489 &wr_object->u.response);
James Smartf3d0a8a2019-12-18 15:58:01 -080019490
19491 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19492 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19493 shdr_csf = bf_get(lpfc_wr_object_csf,
19494 &wr_object->u.response);
19495 if (shdr_csf)
19496 shdr_change_status =
19497 LPFC_CHANGE_STATUS_PCI_RESET;
19498 }
19499
James Smart50212672018-12-13 15:17:57 -080019500 switch (shdr_change_status) {
19501 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19502 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19503 "3198 Firmware write complete: System "
19504 "reboot required to instantiate\n");
19505 break;
19506 case (LPFC_CHANGE_STATUS_FW_RESET):
19507 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19508 "3199 Firmware write complete: Firmware"
19509 " reset required to instantiate\n");
19510 break;
19511 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19512 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19513 "3200 Firmware write complete: Port "
19514 "Migration or PCI Reset required to "
19515 "instantiate\n");
19516 break;
19517 case (LPFC_CHANGE_STATUS_PCI_RESET):
19518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19519 "3201 Firmware write complete: PCI "
19520 "Reset required to instantiate\n");
19521 break;
19522 default:
19523 break;
19524 }
19525 }
James Smart52d52442011-05-24 11:42:45 -040019526 if (rc != MBX_TIMEOUT)
19527 mempool_free(mbox, phba->mbox_mem_pool);
19528 if (shdr_status || shdr_add_status || rc) {
19529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19530 "3025 Write Object mailbox failed with "
19531 "status x%x add_status x%x, mbx status x%x\n",
19532 shdr_status, shdr_add_status, rc);
19533 rc = -ENXIO;
James Smart1feb8202018-02-22 08:18:47 -080019534 *offset = shdr_add_status;
James Smart52d52442011-05-24 11:42:45 -040019535 } else
19536 *offset += wr_object->u.response.actual_write_length;
19537 return rc;
19538}
19539
19540/**
James Smart695a8142010-01-26 23:08:03 -050019541 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19542 * @vport: pointer to vport data structure.
19543 *
19544 * This function iterate through the mailboxq and clean up all REG_LOGIN
19545 * and REG_VPI mailbox commands associated with the vport. This function
19546 * is called when driver want to restart discovery of the vport due to
19547 * a Clear Virtual Link event.
19548 **/
19549void
19550lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19551{
19552 struct lpfc_hba *phba = vport->phba;
19553 LPFC_MBOXQ_t *mb, *nextmb;
19554 struct lpfc_dmabuf *mp;
James Smart78730cf2010-04-06 15:06:30 -040019555 struct lpfc_nodelist *ndlp;
James Smartd439d282010-09-29 11:18:45 -040019556 struct lpfc_nodelist *act_mbx_ndlp = NULL;
James Smart589a52d2010-07-14 15:30:54 -040019557 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
James Smartd439d282010-09-29 11:18:45 -040019558 LIST_HEAD(mbox_cmd_list);
James Smart63e801c2010-11-20 23:14:19 -050019559 uint8_t restart_loop;
James Smart695a8142010-01-26 23:08:03 -050019560
James Smartd439d282010-09-29 11:18:45 -040019561 /* Clean up internally queued mailbox commands with the vport */
James Smart695a8142010-01-26 23:08:03 -050019562 spin_lock_irq(&phba->hbalock);
19563 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19564 if (mb->vport != vport)
19565 continue;
19566
19567 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19568 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19569 continue;
19570
James Smartd439d282010-09-29 11:18:45 -040019571 list_del(&mb->list);
19572 list_add_tail(&mb->list, &mbox_cmd_list);
19573 }
19574 /* Clean up active mailbox command with the vport */
19575 mb = phba->sli.mbox_active;
19576 if (mb && (mb->vport == vport)) {
19577 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19578 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19579 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19580 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -080019581 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
James Smartd439d282010-09-29 11:18:45 -040019582 /* Put reference count for delayed processing */
19583 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19584 /* Unregister the RPI when mailbox complete */
19585 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19586 }
19587 }
James Smart63e801c2010-11-20 23:14:19 -050019588 /* Cleanup any mailbox completions which are not yet processed */
19589 do {
19590 restart_loop = 0;
19591 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19592 /*
19593 * If this mailox is already processed or it is
19594 * for another vport ignore it.
19595 */
19596 if ((mb->vport != vport) ||
19597 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19598 continue;
19599
19600 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19601 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19602 continue;
19603
19604 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19605 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -080019606 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
James Smart63e801c2010-11-20 23:14:19 -050019607 /* Unregister the RPI when mailbox complete */
19608 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19609 restart_loop = 1;
19610 spin_unlock_irq(&phba->hbalock);
19611 spin_lock(shost->host_lock);
19612 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19613 spin_unlock(shost->host_lock);
19614 spin_lock_irq(&phba->hbalock);
19615 break;
19616 }
19617 }
19618 } while (restart_loop);
19619
James Smartd439d282010-09-29 11:18:45 -040019620 spin_unlock_irq(&phba->hbalock);
19621
19622 /* Release the cleaned-up mailbox commands */
19623 while (!list_empty(&mbox_cmd_list)) {
19624 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
James Smart695a8142010-01-26 23:08:03 -050019625 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -080019626 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
James Smart695a8142010-01-26 23:08:03 -050019627 if (mp) {
19628 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19629 kfree(mp);
19630 }
James Smart3e1f0712018-11-29 16:09:29 -080019631 mb->ctx_buf = NULL;
19632 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19633 mb->ctx_ndlp = NULL;
James Smart78730cf2010-04-06 15:06:30 -040019634 if (ndlp) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020019635 spin_lock(shost->host_lock);
James Smart589a52d2010-07-14 15:30:54 -040019636 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
Dan Carpenterec21b3b2010-08-08 00:15:17 +020019637 spin_unlock(shost->host_lock);
James Smart78730cf2010-04-06 15:06:30 -040019638 lpfc_nlp_put(ndlp);
James Smart78730cf2010-04-06 15:06:30 -040019639 }
James Smart695a8142010-01-26 23:08:03 -050019640 }
James Smart695a8142010-01-26 23:08:03 -050019641 mempool_free(mb, phba->mbox_mem_pool);
19642 }
James Smartd439d282010-09-29 11:18:45 -040019643
19644 /* Release the ndlp with the cleaned-up active mailbox command */
19645 if (act_mbx_ndlp) {
19646 spin_lock(shost->host_lock);
19647 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19648 spin_unlock(shost->host_lock);
19649 lpfc_nlp_put(act_mbx_ndlp);
James Smart695a8142010-01-26 23:08:03 -050019650 }
James Smart695a8142010-01-26 23:08:03 -050019651}
19652
James Smart2a9bf3d2010-06-07 15:24:45 -040019653/**
19654 * lpfc_drain_txq - Drain the txq
19655 * @phba: Pointer to HBA context object.
19656 *
19657 * This function attempt to submit IOCBs on the txq
19658 * to the adapter. For SLI4 adapters, the txq contains
19659 * ELS IOCBs that have been deferred because the there
19660 * are no SGLs. This congestion can occur with large
19661 * vport counts during node discovery.
19662 **/
19663
19664uint32_t
19665lpfc_drain_txq(struct lpfc_hba *phba)
19666{
19667 LIST_HEAD(completions);
James Smart895427b2017-02-12 13:52:30 -080019668 struct lpfc_sli_ring *pring;
Daeseok Youn2e706372014-02-21 09:03:32 +090019669 struct lpfc_iocbq *piocbq = NULL;
James Smart2a9bf3d2010-06-07 15:24:45 -040019670 unsigned long iflags = 0;
19671 char *fail_msg = NULL;
19672 struct lpfc_sglq *sglq;
James Smart205e8242018-03-05 12:04:03 -080019673 union lpfc_wqe128 wqe;
James Smarta2fc4aef2014-09-03 12:57:55 -040019674 uint32_t txq_cnt = 0;
James Smartdc19e3b2018-05-24 21:08:57 -070019675 struct lpfc_queue *wq;
James Smart2a9bf3d2010-06-07 15:24:45 -040019676
James Smartdc19e3b2018-05-24 21:08:57 -070019677 if (phba->link_flag & LS_MDS_LOOPBACK) {
19678 /* MDS WQE are posted only to first WQ*/
James Smartc00f62e2019-08-14 16:57:11 -070019679 wq = phba->sli4_hba.hdwq[0].io_wq;
James Smartdc19e3b2018-05-24 21:08:57 -070019680 if (unlikely(!wq))
19681 return 0;
19682 pring = wq->pring;
19683 } else {
19684 wq = phba->sli4_hba.els_wq;
19685 if (unlikely(!wq))
19686 return 0;
19687 pring = lpfc_phba_elsring(phba);
19688 }
19689
19690 if (unlikely(!pring) || list_empty(&pring->txq))
Dick Kennedy1234a6d2017-09-29 17:34:29 -070019691 return 0;
James Smart895427b2017-02-12 13:52:30 -080019692
James Smart398d81c2013-05-31 17:04:19 -040019693 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart0e9bb8d2013-03-01 16:35:12 -050019694 list_for_each_entry(piocbq, &pring->txq, list) {
19695 txq_cnt++;
19696 }
19697
19698 if (txq_cnt > pring->txq_max)
19699 pring->txq_max = txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040019700
James Smart398d81c2013-05-31 17:04:19 -040019701 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040019702
James Smart0e9bb8d2013-03-01 16:35:12 -050019703 while (!list_empty(&pring->txq)) {
James Smart398d81c2013-05-31 17:04:19 -040019704 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040019705
James Smart19ca7602010-11-20 23:11:55 -050019706 piocbq = lpfc_sli_ringtx_get(phba, pring);
James Smarta6298522012-06-12 13:54:11 -040019707 if (!piocbq) {
James Smart398d81c2013-05-31 17:04:19 -040019708 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smarta6298522012-06-12 13:54:11 -040019709 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19710 "2823 txq empty and txq_cnt is %d\n ",
James Smart0e9bb8d2013-03-01 16:35:12 -050019711 txq_cnt);
James Smarta6298522012-06-12 13:54:11 -040019712 break;
19713 }
James Smart895427b2017-02-12 13:52:30 -080019714 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -040019715 if (!sglq) {
James Smart19ca7602010-11-20 23:11:55 -050019716 __lpfc_sli_ringtx_put(phba, pring, piocbq);
James Smart398d81c2013-05-31 17:04:19 -040019717 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040019718 break;
James Smart2a9bf3d2010-06-07 15:24:45 -040019719 }
James Smart0e9bb8d2013-03-01 16:35:12 -050019720 txq_cnt--;
James Smart2a9bf3d2010-06-07 15:24:45 -040019721
19722 /* The xri and iocb resources secured,
19723 * attempt to issue request
19724 */
James Smart6d368e52011-05-24 11:44:12 -040019725 piocbq->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040019726 piocbq->sli4_xritag = sglq->sli4_xritag;
19727 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19728 fail_msg = "to convert bpl to sgl";
James Smart205e8242018-03-05 12:04:03 -080019729 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040019730 fail_msg = "to convert iocb to wqe";
James Smartdc19e3b2018-05-24 21:08:57 -070019731 else if (lpfc_sli4_wq_put(wq, &wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040019732 fail_msg = " - Wq is full";
19733 else
19734 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19735
19736 if (fail_msg) {
19737 /* Failed means we can't issue and need to cancel */
19738 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19739 "2822 IOCB failed %s iotag 0x%x "
19740 "xri 0x%x\n",
19741 fail_msg,
19742 piocbq->iotag, piocbq->sli4_xritag);
19743 list_add_tail(&piocbq->list, &completions);
19744 }
James Smart398d81c2013-05-31 17:04:19 -040019745 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040019746 }
19747
James Smart2a9bf3d2010-06-07 15:24:45 -040019748 /* Cancel all the IOCBs that cannot be issued */
19749 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19750 IOERR_SLI_ABORTED);
19751
James Smart0e9bb8d2013-03-01 16:35:12 -050019752 return txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040019753}
James Smart895427b2017-02-12 13:52:30 -080019754
19755/**
19756 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19757 * @phba: Pointer to HBA context object.
19758 * @pwqe: Pointer to command WQE.
19759 * @sglq: Pointer to the scatter gather queue object.
19760 *
19761 * This routine converts the bpl or bde that is in the WQE
19762 * to a sgl list for the sli4 hardware. The physical address
19763 * of the bpl/bde is converted back to a virtual address.
19764 * If the WQE contains a BPL then the list of BDE's is
19765 * converted to sli4_sge's. If the WQE contains a single
19766 * BDE then it is converted to a single sli_sge.
19767 * The WQE is still in cpu endianness so the contents of
19768 * the bpl can be used without byte swapping.
19769 *
19770 * Returns valid XRI = Success, NO_XRI = Failure.
19771 */
19772static uint16_t
19773lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19774 struct lpfc_sglq *sglq)
19775{
19776 uint16_t xritag = NO_XRI;
19777 struct ulp_bde64 *bpl = NULL;
19778 struct ulp_bde64 bde;
19779 struct sli4_sge *sgl = NULL;
19780 struct lpfc_dmabuf *dmabuf;
James Smart205e8242018-03-05 12:04:03 -080019781 union lpfc_wqe128 *wqe;
James Smart895427b2017-02-12 13:52:30 -080019782 int numBdes = 0;
19783 int i = 0;
19784 uint32_t offset = 0; /* accumulated offset in the sg request list */
19785 int inbound = 0; /* number of sg reply entries inbound from firmware */
19786 uint32_t cmd;
19787
19788 if (!pwqeq || !sglq)
19789 return xritag;
19790
19791 sgl = (struct sli4_sge *)sglq->sgl;
19792 wqe = &pwqeq->wqe;
19793 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19794
19795 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19796 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19797 return sglq->sli4_xritag;
19798 numBdes = pwqeq->rsvd2;
19799 if (numBdes) {
19800 /* The addrHigh and addrLow fields within the WQE
19801 * have not been byteswapped yet so there is no
19802 * need to swap them back.
19803 */
19804 if (pwqeq->context3)
19805 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19806 else
19807 return xritag;
19808
19809 bpl = (struct ulp_bde64 *)dmabuf->virt;
19810 if (!bpl)
19811 return xritag;
19812
19813 for (i = 0; i < numBdes; i++) {
19814 /* Should already be byte swapped. */
19815 sgl->addr_hi = bpl->addrHigh;
19816 sgl->addr_lo = bpl->addrLow;
19817
19818 sgl->word2 = le32_to_cpu(sgl->word2);
19819 if ((i+1) == numBdes)
19820 bf_set(lpfc_sli4_sge_last, sgl, 1);
19821 else
19822 bf_set(lpfc_sli4_sge_last, sgl, 0);
19823 /* swap the size field back to the cpu so we
19824 * can assign it to the sgl.
19825 */
19826 bde.tus.w = le32_to_cpu(bpl->tus.w);
19827 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19828 /* The offsets in the sgl need to be accumulated
19829 * separately for the request and reply lists.
19830 * The request is always first, the reply follows.
19831 */
19832 switch (cmd) {
19833 case CMD_GEN_REQUEST64_WQE:
19834 /* add up the reply sg entries */
19835 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19836 inbound++;
19837 /* first inbound? reset the offset */
19838 if (inbound == 1)
19839 offset = 0;
19840 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19841 bf_set(lpfc_sli4_sge_type, sgl,
19842 LPFC_SGE_TYPE_DATA);
19843 offset += bde.tus.f.bdeSize;
19844 break;
19845 case CMD_FCP_TRSP64_WQE:
19846 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19847 bf_set(lpfc_sli4_sge_type, sgl,
19848 LPFC_SGE_TYPE_DATA);
19849 break;
19850 case CMD_FCP_TSEND64_WQE:
19851 case CMD_FCP_TRECEIVE64_WQE:
19852 bf_set(lpfc_sli4_sge_type, sgl,
19853 bpl->tus.f.bdeFlags);
19854 if (i < 3)
19855 offset = 0;
19856 else
19857 offset += bde.tus.f.bdeSize;
19858 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19859 break;
19860 }
19861 sgl->word2 = cpu_to_le32(sgl->word2);
19862 bpl++;
19863 sgl++;
19864 }
19865 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19866 /* The addrHigh and addrLow fields of the BDE have not
19867 * been byteswapped yet so they need to be swapped
19868 * before putting them in the sgl.
19869 */
19870 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19871 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19872 sgl->word2 = le32_to_cpu(sgl->word2);
19873 bf_set(lpfc_sli4_sge_last, sgl, 1);
19874 sgl->word2 = cpu_to_le32(sgl->word2);
19875 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19876 }
19877 return sglq->sli4_xritag;
19878}
19879
19880/**
19881 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19882 * @phba: Pointer to HBA context object.
19883 * @ring_number: Base sli ring number
19884 * @pwqe: Pointer to command WQE.
19885 **/
19886int
James Smart1fbf9742019-01-28 11:14:26 -080019887lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
James Smart895427b2017-02-12 13:52:30 -080019888 struct lpfc_iocbq *pwqe)
19889{
James Smart205e8242018-03-05 12:04:03 -080019890 union lpfc_wqe128 *wqe = &pwqe->wqe;
James Smartf358dd02017-02-12 13:52:34 -080019891 struct lpfc_nvmet_rcv_ctx *ctxp;
James Smart895427b2017-02-12 13:52:30 -080019892 struct lpfc_queue *wq;
19893 struct lpfc_sglq *sglq;
19894 struct lpfc_sli_ring *pring;
19895 unsigned long iflags;
Dick Kennedycd22d602017-08-23 16:55:35 -070019896 uint32_t ret = 0;
James Smart895427b2017-02-12 13:52:30 -080019897
19898 /* NVME_LS and NVME_LS ABTS requests. */
19899 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19900 pring = phba->sli4_hba.nvmels_wq->pring;
James Smart6a828b02019-01-28 11:14:31 -080019901 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19902 qp, wq_access);
James Smart895427b2017-02-12 13:52:30 -080019903 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19904 if (!sglq) {
19905 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19906 return WQE_BUSY;
19907 }
19908 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19909 pwqe->sli4_xritag = sglq->sli4_xritag;
19910 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19911 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19912 return WQE_ERROR;
19913 }
19914 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19915 pwqe->sli4_xritag);
Dick Kennedycd22d602017-08-23 16:55:35 -070019916 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19917 if (ret) {
James Smart895427b2017-02-12 13:52:30 -080019918 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedycd22d602017-08-23 16:55:35 -070019919 return ret;
James Smart895427b2017-02-12 13:52:30 -080019920 }
Dick Kennedycd22d602017-08-23 16:55:35 -070019921
James Smart895427b2017-02-12 13:52:30 -080019922 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19923 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080019924
19925 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
James Smart895427b2017-02-12 13:52:30 -080019926 return 0;
19927 }
19928
19929 /* NVME_FCREQ and NVME_ABTS requests */
19930 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19931 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
James Smartc00f62e2019-08-14 16:57:11 -070019932 wq = qp->io_wq;
James Smart1fbf9742019-01-28 11:14:26 -080019933 pring = wq->pring;
James Smart895427b2017-02-12 13:52:30 -080019934
James Smartc00f62e2019-08-14 16:57:11 -070019935 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
James Smart895427b2017-02-12 13:52:30 -080019936
James Smart6a828b02019-01-28 11:14:31 -080019937 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19938 qp, wq_access);
Dick Kennedycd22d602017-08-23 16:55:35 -070019939 ret = lpfc_sli4_wq_put(wq, wqe);
19940 if (ret) {
James Smart895427b2017-02-12 13:52:30 -080019941 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedycd22d602017-08-23 16:55:35 -070019942 return ret;
James Smart895427b2017-02-12 13:52:30 -080019943 }
19944 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19945 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080019946
19947 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
James Smart895427b2017-02-12 13:52:30 -080019948 return 0;
19949 }
19950
James Smartf358dd02017-02-12 13:52:34 -080019951 /* NVMET requests */
19952 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19953 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
James Smartc00f62e2019-08-14 16:57:11 -070019954 wq = qp->io_wq;
James Smart1fbf9742019-01-28 11:14:26 -080019955 pring = wq->pring;
James Smartf358dd02017-02-12 13:52:34 -080019956
James Smartf358dd02017-02-12 13:52:34 -080019957 ctxp = pwqe->context2;
James Smart6c621a22017-05-15 15:20:45 -070019958 sglq = ctxp->ctxbuf->sglq;
James Smartf358dd02017-02-12 13:52:34 -080019959 if (pwqe->sli4_xritag == NO_XRI) {
19960 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19961 pwqe->sli4_xritag = sglq->sli4_xritag;
19962 }
19963 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19964 pwqe->sli4_xritag);
James Smartc00f62e2019-08-14 16:57:11 -070019965 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
James Smart1fbf9742019-01-28 11:14:26 -080019966
James Smart6a828b02019-01-28 11:14:31 -080019967 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19968 qp, wq_access);
Dick Kennedycd22d602017-08-23 16:55:35 -070019969 ret = lpfc_sli4_wq_put(wq, wqe);
19970 if (ret) {
James Smartf358dd02017-02-12 13:52:34 -080019971 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedycd22d602017-08-23 16:55:35 -070019972 return ret;
James Smartf358dd02017-02-12 13:52:34 -080019973 }
19974 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19975 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080019976
19977 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
James Smartf358dd02017-02-12 13:52:34 -080019978 return 0;
19979 }
James Smart895427b2017-02-12 13:52:30 -080019980 return WQE_ERROR;
19981}
James Smartc4908502019-01-28 11:14:28 -080019982
19983#ifdef LPFC_MXP_STAT
19984/**
19985 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19986 * @phba: pointer to lpfc hba data structure.
19987 * @hwqid: belong to which HWQ.
19988 *
19989 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19990 * 15 seconds after a test case is running.
19991 *
19992 * The user should call lpfc_debugfs_multixripools_write before running a test
19993 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19994 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19995 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19996 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19997 **/
19998void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19999{
20000 struct lpfc_sli4_hdw_queue *qp;
20001 struct lpfc_multixri_pool *multixri_pool;
20002 struct lpfc_pvt_pool *pvt_pool;
20003 struct lpfc_pbl_pool *pbl_pool;
20004 u32 txcmplq_cnt;
20005
20006 qp = &phba->sli4_hba.hdwq[hwqid];
20007 multixri_pool = qp->p_multixri_pool;
20008 if (!multixri_pool)
20009 return;
20010
20011 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20012 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20013 pbl_pool = &qp->p_multixri_pool->pbl_pool;
James Smartc00f62e2019-08-14 16:57:11 -070020014 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
James Smartc4908502019-01-28 11:14:28 -080020015
20016 multixri_pool->stat_pbl_count = pbl_pool->count;
20017 multixri_pool->stat_pvt_count = pvt_pool->count;
20018 multixri_pool->stat_busy_count = txcmplq_cnt;
20019 }
20020
20021 multixri_pool->stat_snapshot_taken++;
20022}
20023#endif
20024
20025/**
20026 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20027 * @phba: pointer to lpfc hba data structure.
20028 * @hwqid: belong to which HWQ.
20029 *
20030 * This routine moves some XRIs from private to public pool when private pool
20031 * is not busy.
20032 **/
20033void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20034{
20035 struct lpfc_multixri_pool *multixri_pool;
20036 u32 io_req_count;
20037 u32 prev_io_req_count;
20038
20039 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20040 if (!multixri_pool)
20041 return;
20042 io_req_count = multixri_pool->io_req_count;
20043 prev_io_req_count = multixri_pool->prev_io_req_count;
20044
20045 if (prev_io_req_count != io_req_count) {
20046 /* Private pool is busy */
20047 multixri_pool->prev_io_req_count = io_req_count;
20048 } else {
20049 /* Private pool is not busy.
20050 * Move XRIs from private to public pool.
20051 */
20052 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20053 }
20054}
20055
20056/**
20057 * lpfc_adjust_high_watermark - Adjust high watermark
20058 * @phba: pointer to lpfc hba data structure.
20059 * @hwqid: belong to which HWQ.
20060 *
20061 * This routine sets high watermark as number of outstanding XRIs,
20062 * but make sure the new value is between xri_limit/2 and xri_limit.
20063 **/
20064void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20065{
20066 u32 new_watermark;
20067 u32 watermark_max;
20068 u32 watermark_min;
20069 u32 xri_limit;
20070 u32 txcmplq_cnt;
20071 u32 abts_io_bufs;
20072 struct lpfc_multixri_pool *multixri_pool;
20073 struct lpfc_sli4_hdw_queue *qp;
20074
20075 qp = &phba->sli4_hba.hdwq[hwqid];
20076 multixri_pool = qp->p_multixri_pool;
20077 if (!multixri_pool)
20078 return;
20079 xri_limit = multixri_pool->xri_limit;
20080
20081 watermark_max = xri_limit;
20082 watermark_min = xri_limit / 2;
20083
James Smartc00f62e2019-08-14 16:57:11 -070020084 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
James Smartc4908502019-01-28 11:14:28 -080020085 abts_io_bufs = qp->abts_scsi_io_bufs;
James Smartc00f62e2019-08-14 16:57:11 -070020086 abts_io_bufs += qp->abts_nvme_io_bufs;
James Smartc4908502019-01-28 11:14:28 -080020087
20088 new_watermark = txcmplq_cnt + abts_io_bufs;
20089 new_watermark = min(watermark_max, new_watermark);
20090 new_watermark = max(watermark_min, new_watermark);
20091 multixri_pool->pvt_pool.high_watermark = new_watermark;
20092
20093#ifdef LPFC_MXP_STAT
20094 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20095 new_watermark);
20096#endif
20097}
20098
20099/**
20100 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20101 * @phba: pointer to lpfc hba data structure.
20102 * @hwqid: belong to which HWQ.
20103 *
20104 * This routine is called from hearbeat timer when pvt_pool is idle.
20105 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20106 * The first step moves (all - low_watermark) amount of XRIs.
20107 * The second step moves the rest of XRIs.
20108 **/
20109void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20110{
20111 struct lpfc_pbl_pool *pbl_pool;
20112 struct lpfc_pvt_pool *pvt_pool;
James Smart6a828b02019-01-28 11:14:31 -080020113 struct lpfc_sli4_hdw_queue *qp;
James Smartc4908502019-01-28 11:14:28 -080020114 struct lpfc_io_buf *lpfc_ncmd;
20115 struct lpfc_io_buf *lpfc_ncmd_next;
20116 unsigned long iflag;
20117 struct list_head tmp_list;
20118 u32 tmp_count;
20119
James Smart6a828b02019-01-28 11:14:31 -080020120 qp = &phba->sli4_hba.hdwq[hwqid];
20121 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20122 pvt_pool = &qp->p_multixri_pool->pvt_pool;
James Smartc4908502019-01-28 11:14:28 -080020123 tmp_count = 0;
20124
James Smart6a828b02019-01-28 11:14:31 -080020125 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20126 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080020127
20128 if (pvt_pool->count > pvt_pool->low_watermark) {
20129 /* Step 1: move (all - low_watermark) from pvt_pool
20130 * to pbl_pool
20131 */
20132
20133 /* Move low watermark of bufs from pvt_pool to tmp_list */
20134 INIT_LIST_HEAD(&tmp_list);
20135 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20136 &pvt_pool->list, list) {
20137 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20138 tmp_count++;
20139 if (tmp_count >= pvt_pool->low_watermark)
20140 break;
20141 }
20142
20143 /* Move all bufs from pvt_pool to pbl_pool */
20144 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20145
20146 /* Move all bufs from tmp_list to pvt_pool */
20147 list_splice(&tmp_list, &pvt_pool->list);
20148
20149 pbl_pool->count += (pvt_pool->count - tmp_count);
20150 pvt_pool->count = tmp_count;
20151 } else {
20152 /* Step 2: move the rest from pvt_pool to pbl_pool */
20153 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20154 pbl_pool->count += pvt_pool->count;
20155 pvt_pool->count = 0;
20156 }
20157
20158 spin_unlock(&pvt_pool->lock);
20159 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20160}
20161
20162/**
20163 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20164 * @phba: pointer to lpfc hba data structure
20165 * @pbl_pool: specified public free XRI pool
20166 * @pvt_pool: specified private free XRI pool
20167 * @count: number of XRIs to move
20168 *
20169 * This routine tries to move some free common bufs from the specified pbl_pool
20170 * to the specified pvt_pool. It might move less than count XRIs if there's not
20171 * enough in public pool.
20172 *
20173 * Return:
20174 * true - if XRIs are successfully moved from the specified pbl_pool to the
20175 * specified pvt_pool
20176 * false - if the specified pbl_pool is empty or locked by someone else
20177 **/
20178static bool
James Smart6a828b02019-01-28 11:14:31 -080020179_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20180 struct lpfc_pbl_pool *pbl_pool,
James Smartc4908502019-01-28 11:14:28 -080020181 struct lpfc_pvt_pool *pvt_pool, u32 count)
20182{
20183 struct lpfc_io_buf *lpfc_ncmd;
20184 struct lpfc_io_buf *lpfc_ncmd_next;
20185 unsigned long iflag;
20186 int ret;
20187
20188 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20189 if (ret) {
20190 if (pbl_pool->count) {
20191 /* Move a batch of XRIs from public to private pool */
James Smart6a828b02019-01-28 11:14:31 -080020192 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080020193 list_for_each_entry_safe(lpfc_ncmd,
20194 lpfc_ncmd_next,
20195 &pbl_pool->list,
20196 list) {
20197 list_move_tail(&lpfc_ncmd->list,
20198 &pvt_pool->list);
20199 pvt_pool->count++;
20200 pbl_pool->count--;
20201 count--;
20202 if (count == 0)
20203 break;
20204 }
20205
20206 spin_unlock(&pvt_pool->lock);
20207 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20208 return true;
20209 }
20210 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20211 }
20212
20213 return false;
20214}
20215
20216/**
20217 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20218 * @phba: pointer to lpfc hba data structure.
20219 * @hwqid: belong to which HWQ.
20220 * @count: number of XRIs to move
20221 *
20222 * This routine tries to find some free common bufs in one of public pools with
20223 * Round Robin method. The search always starts from local hwqid, then the next
20224 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20225 * a batch of free common bufs are moved to private pool on hwqid.
20226 * It might move less than count XRIs if there's not enough in public pool.
20227 **/
20228void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20229{
20230 struct lpfc_multixri_pool *multixri_pool;
20231 struct lpfc_multixri_pool *next_multixri_pool;
20232 struct lpfc_pvt_pool *pvt_pool;
20233 struct lpfc_pbl_pool *pbl_pool;
James Smart6a828b02019-01-28 11:14:31 -080020234 struct lpfc_sli4_hdw_queue *qp;
James Smartc4908502019-01-28 11:14:28 -080020235 u32 next_hwqid;
20236 u32 hwq_count;
20237 int ret;
20238
James Smart6a828b02019-01-28 11:14:31 -080020239 qp = &phba->sli4_hba.hdwq[hwqid];
20240 multixri_pool = qp->p_multixri_pool;
James Smartc4908502019-01-28 11:14:28 -080020241 pvt_pool = &multixri_pool->pvt_pool;
20242 pbl_pool = &multixri_pool->pbl_pool;
20243
20244 /* Check if local pbl_pool is available */
James Smart6a828b02019-01-28 11:14:31 -080020245 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
James Smartc4908502019-01-28 11:14:28 -080020246 if (ret) {
20247#ifdef LPFC_MXP_STAT
20248 multixri_pool->local_pbl_hit_count++;
20249#endif
20250 return;
20251 }
20252
20253 hwq_count = phba->cfg_hdw_queue;
20254
20255 /* Get the next hwqid which was found last time */
20256 next_hwqid = multixri_pool->rrb_next_hwqid;
20257
20258 do {
20259 /* Go to next hwq */
20260 next_hwqid = (next_hwqid + 1) % hwq_count;
20261
20262 next_multixri_pool =
20263 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20264 pbl_pool = &next_multixri_pool->pbl_pool;
20265
20266 /* Check if the public free xri pool is available */
20267 ret = _lpfc_move_xri_pbl_to_pvt(
James Smart6a828b02019-01-28 11:14:31 -080020268 phba, qp, pbl_pool, pvt_pool, count);
James Smartc4908502019-01-28 11:14:28 -080020269
20270 /* Exit while-loop if success or all hwqid are checked */
20271 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20272
20273 /* Starting point for the next time */
20274 multixri_pool->rrb_next_hwqid = next_hwqid;
20275
20276 if (!ret) {
20277 /* stats: all public pools are empty*/
20278 multixri_pool->pbl_empty_count++;
20279 }
20280
20281#ifdef LPFC_MXP_STAT
20282 if (ret) {
20283 if (next_hwqid == hwqid)
20284 multixri_pool->local_pbl_hit_count++;
20285 else
20286 multixri_pool->other_pbl_hit_count++;
20287 }
20288#endif
20289}
20290
20291/**
20292 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20293 * @phba: pointer to lpfc hba data structure.
20294 * @qp: belong to which HWQ.
20295 *
20296 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20297 * low watermark.
20298 **/
20299void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20300{
20301 struct lpfc_multixri_pool *multixri_pool;
20302 struct lpfc_pvt_pool *pvt_pool;
20303
20304 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20305 pvt_pool = &multixri_pool->pvt_pool;
20306
20307 if (pvt_pool->count < pvt_pool->low_watermark)
20308 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20309}
20310
20311/**
20312 * lpfc_release_io_buf - Return one IO buf back to free pool
20313 * @phba: pointer to lpfc hba data structure.
20314 * @lpfc_ncmd: IO buf to be returned.
20315 * @qp: belong to which HWQ.
20316 *
20317 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20318 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20319 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20320 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20321 * lpfc_io_buf_list_put.
20322 **/
20323void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20324 struct lpfc_sli4_hdw_queue *qp)
20325{
20326 unsigned long iflag;
20327 struct lpfc_pbl_pool *pbl_pool;
20328 struct lpfc_pvt_pool *pvt_pool;
20329 struct lpfc_epd_pool *epd_pool;
20330 u32 txcmplq_cnt;
20331 u32 xri_owned;
20332 u32 xri_limit;
20333 u32 abts_io_bufs;
20334
20335 /* MUST zero fields if buffer is reused by another protocol */
20336 lpfc_ncmd->nvmeCmd = NULL;
20337 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20338 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20339
James Smart35a635a2019-09-21 20:59:02 -070020340 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20341 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20342 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20343
20344 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20345 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20346
James Smartc4908502019-01-28 11:14:28 -080020347 if (phba->cfg_xri_rebalancing) {
20348 if (lpfc_ncmd->expedite) {
20349 /* Return to expedite pool */
20350 epd_pool = &phba->epd_pool;
20351 spin_lock_irqsave(&epd_pool->lock, iflag);
20352 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20353 epd_pool->count++;
20354 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20355 return;
20356 }
20357
20358 /* Avoid invalid access if an IO sneaks in and is being rejected
20359 * just _after_ xri pools are destroyed in lpfc_offline.
20360 * Nothing much can be done at this point.
20361 */
20362 if (!qp->p_multixri_pool)
20363 return;
20364
20365 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20366 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20367
James Smartc00f62e2019-08-14 16:57:11 -070020368 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
James Smartc4908502019-01-28 11:14:28 -080020369 abts_io_bufs = qp->abts_scsi_io_bufs;
James Smartc00f62e2019-08-14 16:57:11 -070020370 abts_io_bufs += qp->abts_nvme_io_bufs;
James Smartc4908502019-01-28 11:14:28 -080020371
20372 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20373 xri_limit = qp->p_multixri_pool->xri_limit;
20374
20375#ifdef LPFC_MXP_STAT
20376 if (xri_owned <= xri_limit)
20377 qp->p_multixri_pool->below_limit_count++;
20378 else
20379 qp->p_multixri_pool->above_limit_count++;
20380#endif
20381
20382 /* XRI goes to either public or private free xri pool
20383 * based on watermark and xri_limit
20384 */
20385 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20386 (xri_owned < xri_limit &&
20387 pvt_pool->count < pvt_pool->high_watermark)) {
James Smart6a828b02019-01-28 11:14:31 -080020388 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20389 qp, free_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080020390 list_add_tail(&lpfc_ncmd->list,
20391 &pvt_pool->list);
20392 pvt_pool->count++;
20393 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20394 } else {
James Smart6a828b02019-01-28 11:14:31 -080020395 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20396 qp, free_pub_pool);
James Smartc4908502019-01-28 11:14:28 -080020397 list_add_tail(&lpfc_ncmd->list,
20398 &pbl_pool->list);
20399 pbl_pool->count++;
20400 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20401 }
20402 } else {
James Smart6a828b02019-01-28 11:14:31 -080020403 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20404 qp, free_xri);
James Smartc4908502019-01-28 11:14:28 -080020405 list_add_tail(&lpfc_ncmd->list,
20406 &qp->lpfc_io_buf_list_put);
20407 qp->put_io_bufs++;
20408 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20409 iflag);
20410 }
20411}
20412
20413/**
20414 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20415 * @phba: pointer to lpfc hba data structure.
20416 * @pvt_pool: pointer to private pool data structure.
20417 * @ndlp: pointer to lpfc nodelist data structure.
20418 *
20419 * This routine tries to get one free IO buf from private pool.
20420 *
20421 * Return:
20422 * pointer to one free IO buf - if private pool is not empty
20423 * NULL - if private pool is empty
20424 **/
20425static struct lpfc_io_buf *
20426lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
James Smart6a828b02019-01-28 11:14:31 -080020427 struct lpfc_sli4_hdw_queue *qp,
James Smartc4908502019-01-28 11:14:28 -080020428 struct lpfc_pvt_pool *pvt_pool,
20429 struct lpfc_nodelist *ndlp)
20430{
20431 struct lpfc_io_buf *lpfc_ncmd;
20432 struct lpfc_io_buf *lpfc_ncmd_next;
20433 unsigned long iflag;
20434
James Smart6a828b02019-01-28 11:14:31 -080020435 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080020436 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20437 &pvt_pool->list, list) {
20438 if (lpfc_test_rrq_active(
20439 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20440 continue;
20441 list_del(&lpfc_ncmd->list);
20442 pvt_pool->count--;
20443 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20444 return lpfc_ncmd;
20445 }
20446 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20447
20448 return NULL;
20449}
20450
20451/**
20452 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20453 * @phba: pointer to lpfc hba data structure.
20454 *
20455 * This routine tries to get one free IO buf from expedite pool.
20456 *
20457 * Return:
20458 * pointer to one free IO buf - if expedite pool is not empty
20459 * NULL - if expedite pool is empty
20460 **/
20461static struct lpfc_io_buf *
20462lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20463{
20464 struct lpfc_io_buf *lpfc_ncmd;
20465 struct lpfc_io_buf *lpfc_ncmd_next;
20466 unsigned long iflag;
20467 struct lpfc_epd_pool *epd_pool;
20468
20469 epd_pool = &phba->epd_pool;
20470 lpfc_ncmd = NULL;
20471
20472 spin_lock_irqsave(&epd_pool->lock, iflag);
20473 if (epd_pool->count > 0) {
20474 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20475 &epd_pool->list, list) {
20476 list_del(&lpfc_ncmd->list);
20477 epd_pool->count--;
20478 break;
20479 }
20480 }
20481 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20482
20483 return lpfc_ncmd;
20484}
20485
20486/**
20487 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20488 * @phba: pointer to lpfc hba data structure.
20489 * @ndlp: pointer to lpfc nodelist data structure.
20490 * @hwqid: belong to which HWQ
20491 * @expedite: 1 means this request is urgent.
20492 *
20493 * This routine will do the following actions and then return a pointer to
20494 * one free IO buf.
20495 *
20496 * 1. If private free xri count is empty, move some XRIs from public to
20497 * private pool.
20498 * 2. Get one XRI from private free xri pool.
20499 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20500 * get one free xri from expedite pool.
20501 *
20502 * Note: ndlp is only used on SCSI side for RRQ testing.
20503 * The caller should pass NULL for ndlp on NVME side.
20504 *
20505 * Return:
20506 * pointer to one free IO buf - if private pool is not empty
20507 * NULL - if private pool is empty
20508 **/
20509static struct lpfc_io_buf *
20510lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20511 struct lpfc_nodelist *ndlp,
20512 int hwqid, int expedite)
20513{
20514 struct lpfc_sli4_hdw_queue *qp;
20515 struct lpfc_multixri_pool *multixri_pool;
20516 struct lpfc_pvt_pool *pvt_pool;
20517 struct lpfc_io_buf *lpfc_ncmd;
20518
20519 qp = &phba->sli4_hba.hdwq[hwqid];
20520 lpfc_ncmd = NULL;
20521 multixri_pool = qp->p_multixri_pool;
20522 pvt_pool = &multixri_pool->pvt_pool;
20523 multixri_pool->io_req_count++;
20524
20525 /* If pvt_pool is empty, move some XRIs from public to private pool */
20526 if (pvt_pool->count == 0)
20527 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20528
20529 /* Get one XRI from private free xri pool */
James Smart6a828b02019-01-28 11:14:31 -080020530 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
James Smartc4908502019-01-28 11:14:28 -080020531
20532 if (lpfc_ncmd) {
20533 lpfc_ncmd->hdwq = qp;
20534 lpfc_ncmd->hdwq_no = hwqid;
20535 } else if (expedite) {
20536 /* If we fail to get one from pvt_pool and this is an expedite
20537 * request, get one free xri from expedite pool.
20538 */
20539 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20540 }
20541
20542 return lpfc_ncmd;
20543}
20544
20545static inline struct lpfc_io_buf *
20546lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20547{
20548 struct lpfc_sli4_hdw_queue *qp;
20549 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20550
20551 qp = &phba->sli4_hba.hdwq[idx];
20552 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20553 &qp->lpfc_io_buf_list_get, list) {
20554 if (lpfc_test_rrq_active(phba, ndlp,
20555 lpfc_cmd->cur_iocbq.sli4_lxritag))
20556 continue;
20557
20558 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20559 continue;
20560
20561 list_del_init(&lpfc_cmd->list);
20562 qp->get_io_bufs--;
20563 lpfc_cmd->hdwq = qp;
20564 lpfc_cmd->hdwq_no = idx;
20565 return lpfc_cmd;
20566 }
20567 return NULL;
20568}
20569
20570/**
20571 * lpfc_get_io_buf - Get one IO buffer from free pool
20572 * @phba: The HBA for which this call is being executed.
20573 * @ndlp: pointer to lpfc nodelist data structure.
20574 * @hwqid: belong to which HWQ
20575 * @expedite: 1 means this request is urgent.
20576 *
20577 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20578 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20579 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20580 *
20581 * Note: ndlp is only used on SCSI side for RRQ testing.
20582 * The caller should pass NULL for ndlp on NVME side.
20583 *
20584 * Return codes:
20585 * NULL - Error
20586 * Pointer to lpfc_io_buf - Success
20587 **/
20588struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20589 struct lpfc_nodelist *ndlp,
20590 u32 hwqid, int expedite)
20591{
20592 struct lpfc_sli4_hdw_queue *qp;
20593 unsigned long iflag;
20594 struct lpfc_io_buf *lpfc_cmd;
20595
20596 qp = &phba->sli4_hba.hdwq[hwqid];
20597 lpfc_cmd = NULL;
20598
20599 if (phba->cfg_xri_rebalancing)
20600 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20601 phba, ndlp, hwqid, expedite);
20602 else {
James Smart6a828b02019-01-28 11:14:31 -080020603 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20604 qp, alloc_xri_get);
James Smartc4908502019-01-28 11:14:28 -080020605 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20606 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20607 if (!lpfc_cmd) {
James Smart6a828b02019-01-28 11:14:31 -080020608 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20609 qp, alloc_xri_put);
James Smartc4908502019-01-28 11:14:28 -080020610 list_splice(&qp->lpfc_io_buf_list_put,
20611 &qp->lpfc_io_buf_list_get);
20612 qp->get_io_bufs += qp->put_io_bufs;
20613 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20614 qp->put_io_bufs = 0;
20615 spin_unlock(&qp->io_buf_list_put_lock);
20616 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20617 expedite)
20618 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20619 }
20620 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20621 }
20622
20623 return lpfc_cmd;
20624}
James Smartd79c9e92019-08-14 16:57:09 -070020625
20626/**
20627 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20628 * @phba: The HBA for which this call is being executed.
20629 * @lpfc_buf: IO buf structure to append the SGL chunk
20630 *
20631 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20632 * and will allocate an SGL chunk if the pool is empty.
20633 *
20634 * Return codes:
20635 * NULL - Error
20636 * Pointer to sli4_hybrid_sgl - Success
20637 **/
20638struct sli4_hybrid_sgl *
20639lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20640{
20641 struct sli4_hybrid_sgl *list_entry = NULL;
20642 struct sli4_hybrid_sgl *tmp = NULL;
20643 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20644 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20645 struct list_head *buf_list = &hdwq->sgl_list;
James Smarta4c21ac2019-09-21 20:59:01 -070020646 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070020647
James Smarta4c21ac2019-09-21 20:59:01 -070020648 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020649
20650 if (likely(!list_empty(buf_list))) {
20651 /* break off 1 chunk from the sgl_list */
20652 list_for_each_entry_safe(list_entry, tmp,
20653 buf_list, list_node) {
20654 list_move_tail(&list_entry->list_node,
20655 &lpfc_buf->dma_sgl_xtra_list);
20656 break;
20657 }
20658 } else {
20659 /* allocate more */
James Smarta4c21ac2019-09-21 20:59:01 -070020660 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020661 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
James Smart4583a4f2019-11-15 16:38:47 -080020662 cpu_to_node(hdwq->io_wq->chann));
James Smartd79c9e92019-08-14 16:57:09 -070020663 if (!tmp) {
20664 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20665 "8353 error kmalloc memory for HDWQ "
20666 "%d %s\n",
20667 lpfc_buf->hdwq_no, __func__);
20668 return NULL;
20669 }
20670
20671 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20672 GFP_ATOMIC, &tmp->dma_phys_sgl);
20673 if (!tmp->dma_sgl) {
20674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20675 "8354 error pool_alloc memory for HDWQ "
20676 "%d %s\n",
20677 lpfc_buf->hdwq_no, __func__);
20678 kfree(tmp);
20679 return NULL;
20680 }
20681
James Smarta4c21ac2019-09-21 20:59:01 -070020682 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020683 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20684 }
20685
20686 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20687 struct sli4_hybrid_sgl,
20688 list_node);
20689
James Smarta4c21ac2019-09-21 20:59:01 -070020690 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020691
20692 return allocated_sgl;
20693}
20694
20695/**
20696 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20697 * @phba: The HBA for which this call is being executed.
20698 * @lpfc_buf: IO buf structure with the SGL chunk
20699 *
20700 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20701 *
20702 * Return codes:
20703 * 0 - Success
20704 * -EINVAL - Error
20705 **/
20706int
20707lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20708{
20709 int rc = 0;
20710 struct sli4_hybrid_sgl *list_entry = NULL;
20711 struct sli4_hybrid_sgl *tmp = NULL;
20712 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20713 struct list_head *buf_list = &hdwq->sgl_list;
James Smarta4c21ac2019-09-21 20:59:01 -070020714 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070020715
James Smarta4c21ac2019-09-21 20:59:01 -070020716 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020717
20718 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20719 list_for_each_entry_safe(list_entry, tmp,
20720 &lpfc_buf->dma_sgl_xtra_list,
20721 list_node) {
20722 list_move_tail(&list_entry->list_node,
20723 buf_list);
20724 }
20725 } else {
20726 rc = -EINVAL;
20727 }
20728
James Smarta4c21ac2019-09-21 20:59:01 -070020729 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020730 return rc;
20731}
20732
20733/**
20734 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20735 * @phba: phba object
20736 * @hdwq: hdwq to cleanup sgl buff resources on
20737 *
20738 * This routine frees all SGL chunks of hdwq SGL chunk pool.
20739 *
20740 * Return codes:
20741 * None
20742 **/
20743void
20744lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20745 struct lpfc_sli4_hdw_queue *hdwq)
20746{
20747 struct list_head *buf_list = &hdwq->sgl_list;
20748 struct sli4_hybrid_sgl *list_entry = NULL;
20749 struct sli4_hybrid_sgl *tmp = NULL;
James Smarta4c21ac2019-09-21 20:59:01 -070020750 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070020751
James Smarta4c21ac2019-09-21 20:59:01 -070020752 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020753
20754 /* Free sgl pool */
20755 list_for_each_entry_safe(list_entry, tmp,
20756 buf_list, list_node) {
20757 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20758 list_entry->dma_sgl,
20759 list_entry->dma_phys_sgl);
20760 list_del(&list_entry->list_node);
20761 kfree(list_entry);
20762 }
20763
James Smarta4c21ac2019-09-21 20:59:01 -070020764 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020765}
20766
20767/**
20768 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20769 * @phba: The HBA for which this call is being executed.
20770 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20771 *
20772 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20773 * and will allocate an CMD/RSP buffer if the pool is empty.
20774 *
20775 * Return codes:
20776 * NULL - Error
20777 * Pointer to fcp_cmd_rsp_buf - Success
20778 **/
20779struct fcp_cmd_rsp_buf *
20780lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20781 struct lpfc_io_buf *lpfc_buf)
20782{
20783 struct fcp_cmd_rsp_buf *list_entry = NULL;
20784 struct fcp_cmd_rsp_buf *tmp = NULL;
20785 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20786 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20787 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
James Smarta4c21ac2019-09-21 20:59:01 -070020788 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070020789
James Smarta4c21ac2019-09-21 20:59:01 -070020790 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020791
20792 if (likely(!list_empty(buf_list))) {
20793 /* break off 1 chunk from the list */
20794 list_for_each_entry_safe(list_entry, tmp,
20795 buf_list,
20796 list_node) {
20797 list_move_tail(&list_entry->list_node,
20798 &lpfc_buf->dma_cmd_rsp_list);
20799 break;
20800 }
20801 } else {
20802 /* allocate more */
James Smarta4c21ac2019-09-21 20:59:01 -070020803 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020804 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
James Smart4583a4f2019-11-15 16:38:47 -080020805 cpu_to_node(hdwq->io_wq->chann));
James Smartd79c9e92019-08-14 16:57:09 -070020806 if (!tmp) {
20807 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20808 "8355 error kmalloc memory for HDWQ "
20809 "%d %s\n",
20810 lpfc_buf->hdwq_no, __func__);
20811 return NULL;
20812 }
20813
20814 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20815 GFP_ATOMIC,
20816 &tmp->fcp_cmd_rsp_dma_handle);
20817
20818 if (!tmp->fcp_cmnd) {
20819 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20820 "8356 error pool_alloc memory for HDWQ "
20821 "%d %s\n",
20822 lpfc_buf->hdwq_no, __func__);
20823 kfree(tmp);
20824 return NULL;
20825 }
20826
20827 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20828 sizeof(struct fcp_cmnd));
20829
James Smarta4c21ac2019-09-21 20:59:01 -070020830 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020831 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20832 }
20833
20834 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20835 struct fcp_cmd_rsp_buf,
20836 list_node);
20837
James Smarta4c21ac2019-09-21 20:59:01 -070020838 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020839
20840 return allocated_buf;
20841}
20842
20843/**
20844 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20845 * @phba: The HBA for which this call is being executed.
20846 * @lpfc_buf: IO buf structure with the CMD/RSP buf
20847 *
20848 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20849 *
20850 * Return codes:
20851 * 0 - Success
20852 * -EINVAL - Error
20853 **/
20854int
20855lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20856 struct lpfc_io_buf *lpfc_buf)
20857{
20858 int rc = 0;
20859 struct fcp_cmd_rsp_buf *list_entry = NULL;
20860 struct fcp_cmd_rsp_buf *tmp = NULL;
20861 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20862 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
James Smarta4c21ac2019-09-21 20:59:01 -070020863 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070020864
James Smarta4c21ac2019-09-21 20:59:01 -070020865 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020866
20867 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20868 list_for_each_entry_safe(list_entry, tmp,
20869 &lpfc_buf->dma_cmd_rsp_list,
20870 list_node) {
20871 list_move_tail(&list_entry->list_node,
20872 buf_list);
20873 }
20874 } else {
20875 rc = -EINVAL;
20876 }
20877
James Smarta4c21ac2019-09-21 20:59:01 -070020878 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020879 return rc;
20880}
20881
20882/**
20883 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20884 * @phba: phba object
20885 * @hdwq: hdwq to cleanup cmd rsp buff resources on
20886 *
20887 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
20888 *
20889 * Return codes:
20890 * None
20891 **/
20892void
20893lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20894 struct lpfc_sli4_hdw_queue *hdwq)
20895{
20896 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20897 struct fcp_cmd_rsp_buf *list_entry = NULL;
20898 struct fcp_cmd_rsp_buf *tmp = NULL;
James Smarta4c21ac2019-09-21 20:59:01 -070020899 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070020900
James Smarta4c21ac2019-09-21 20:59:01 -070020901 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020902
20903 /* Free cmd_rsp buf pool */
20904 list_for_each_entry_safe(list_entry, tmp,
20905 buf_list,
20906 list_node) {
20907 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
20908 list_entry->fcp_cmnd,
20909 list_entry->fcp_cmd_rsp_dma_handle);
20910 list_del(&list_entry->list_node);
20911 kfree(list_entry);
20912 }
20913
James Smarta4c21ac2019-09-21 20:59:01 -070020914 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070020915}