blob: e844d9a35b4c88a42bb0f8011641719ed0eb8010 [file] [log] [blame]
dea31012005-04-17 16:05:31 -05001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04003 * Fibre Channel Host Bus Adapters. *
James Smart67073c62021-03-01 09:18:21 -08004 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
James Smart50611572016-03-31 14:12:34 -07006 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04007 * EMULEX and SLI are trademarks of Emulex. *
James Smartd080abe2017-02-12 13:52:39 -08008 * www.broadcom.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04009 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -050010 * *
11 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040012 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea31012005-04-17 16:05:31 -050022 *******************************************************************/
23
dea31012005-04-17 16:05:31 -050024#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010029#include <linux/lockdep.h>
dea31012005-04-17 16:05:31 -050030
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040031#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050032#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
James.Smart@Emulex.Comf888ba32005-08-10 15:03:01 -040035#include <scsi/scsi_transport_fc.h>
James Smartda0436e2009-05-22 14:51:39 -040036#include <scsi/fc/fc_fs.h>
James Smart0d878412009-10-02 15:16:56 -040037#include <linux/aer.h>
Dick Kennedy86ee57a2020-06-30 14:49:55 -070038#include <linux/crash_dump.h>
James Smart1351e692018-02-22 08:18:43 -080039#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
dea31012005-04-17 16:05:31 -050042
James Smartda0436e2009-05-22 14:51:39 -040043#include "lpfc_hw4.h"
dea31012005-04-17 16:05:31 -050044#include "lpfc_hw.h"
45#include "lpfc_sli.h"
James Smartda0436e2009-05-22 14:51:39 -040046#include "lpfc_sli4.h"
James Smartea2151b2008-09-07 11:52:10 -040047#include "lpfc_nl.h"
dea31012005-04-17 16:05:31 -050048#include "lpfc_disc.h"
dea31012005-04-17 16:05:31 -050049#include "lpfc.h"
James Smart895427b2017-02-12 13:52:30 -080050#include "lpfc_scsi.h"
51#include "lpfc_nvme.h"
dea31012005-04-17 16:05:31 -050052#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
James Smart858c9f62007-06-17 19:56:39 -050055#include "lpfc_debugfs.h"
James Smart04c68492009-05-22 14:52:52 -040056#include "lpfc_vport.h"
James Smart61bda8f2016-10-13 15:06:05 -070057#include "lpfc_version.h"
dea31012005-04-17 16:05:31 -050058
59/* There are only four IOCB completion types. */
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
James Smart4f774512009-05-22 14:52:35 -040067
68/* Provide function prototypes local to this module. */
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
James Smart45ed1192009-10-02 15:17:02 -040072 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
James Smart6669f9b2009-10-02 15:16:45 -040075static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
James Smartae9e28f2017-05-15 15:20:51 -070077static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
James Smart32517fc2019-01-28 11:14:33 -080079static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
James Smart895427b2017-02-12 13:52:30 -080081static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
James Smart8a9d2e82012-05-09 21:16:12 -040082 int);
Dick Kennedyf485c182017-09-29 17:34:34 -070083static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
James Smart32517fc2019-01-28 11:14:33 -080084 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
James Smarte8d3c3b2013-10-10 12:21:30 -040086static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
James Smart24c7c0a2019-09-21 20:58:58 -070088static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
James Smart05580562011-05-24 11:40:48 -040092
James Smart840a4702020-11-15 11:26:40 -080093union lpfc_wqe128 lpfc_iread_cmd_template;
94union lpfc_wqe128 lpfc_iwrite_cmd_template;
95union lpfc_wqe128 lpfc_icmnd_cmd_template;
96
James Smart4f774512009-05-22 14:52:35 -040097static IOCB_t *
98lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
99{
100 return &iocbq->iocb;
101}
102
James Smart840a4702020-11-15 11:26:40 -0800103/* Setup WQE templates for IOs */
104void lpfc_wqe_cmd_template(void)
105{
106 union lpfc_wqe128 *wqe;
107
108 /* IREAD template */
109 wqe = &lpfc_iread_cmd_template;
110 memset(wqe, 0, sizeof(union lpfc_wqe128));
111
112 /* Word 0, 1, 2 - BDE is variable */
113
114 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
115
116 /* Word 4 - total_xfer_len is variable */
117
118 /* Word 5 - is zero */
119
120 /* Word 6 - ctxt_tag, xri_tag is variable */
121
122 /* Word 7 */
123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
127
128 /* Word 8 - abort_tag is variable */
129
130 /* Word 9 - reqtag is variable */
131
132 /* Word 10 - dbde, wqes is variable */
133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
138
139 /* Word 11 - pbde is variable */
140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
143
144 /* Word 12 - is zero */
145
146 /* Word 13, 14, 15 - PBDE is variable */
147
148 /* IWRITE template */
149 wqe = &lpfc_iwrite_cmd_template;
150 memset(wqe, 0, sizeof(union lpfc_wqe128));
151
152 /* Word 0, 1, 2 - BDE is variable */
153
154 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
155
156 /* Word 4 - total_xfer_len is variable */
157
158 /* Word 5 - initial_xfer_len is variable */
159
160 /* Word 6 - ctxt_tag, xri_tag is variable */
161
162 /* Word 7 */
163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
167
168 /* Word 8 - abort_tag is variable */
169
170 /* Word 9 - reqtag is variable */
171
172 /* Word 10 - dbde, wqes is variable */
173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
178
179 /* Word 11 - pbde is variable */
180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
183
184 /* Word 12 - is zero */
185
186 /* Word 13, 14, 15 - PBDE is variable */
187
188 /* ICMND template */
189 wqe = &lpfc_icmnd_cmd_template;
190 memset(wqe, 0, sizeof(union lpfc_wqe128));
191
192 /* Word 0, 1, 2 - BDE is variable */
193
194 /* Word 3 - payload_offset_len is variable */
195
196 /* Word 4, 5 - is zero */
197
198 /* Word 6 - ctxt_tag, xri_tag is variable */
199
200 /* Word 7 */
201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
205
206 /* Word 8 - abort_tag is variable */
207
208 /* Word 9 - reqtag is variable */
209
210 /* Word 10 - dbde, wqes is variable */
211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
216
217 /* Word 11 */
218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
221
222 /* Word 12, 13, 14, 15 - is zero */
223}
224
James Smart48f8fdb2018-05-04 20:37:51 -0700225#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
226/**
227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
228 * @srcp: Source memory pointer.
229 * @destp: Destination memory pointer.
230 * @cnt: Number of words required to be copied.
231 * Must be a multiple of sizeof(uint64_t)
232 *
233 * This function is used for copying data between driver memory
234 * and the SLI WQ. This function also changes the endianness
235 * of each word if native endianness is different from SLI
236 * endianness. This function can be called with or without
237 * lock.
238 **/
YueHaibingd7b761b2019-05-31 23:28:41 +0800239static void
James Smart48f8fdb2018-05-04 20:37:51 -0700240lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
241{
242 uint64_t *src = srcp;
243 uint64_t *dest = destp;
244 int i;
245
246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
247 *dest++ = *src++;
248}
249#else
250#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
251#endif
252
James Smart4f774512009-05-22 14:52:35 -0400253/**
254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
255 * @q: The Work Queue to operate on.
256 * @wqe: The work Queue Entry to put on the Work queue.
257 *
258 * This routine will copy the contents of @wqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
262 * -ENOMEM.
263 * The caller is expected to hold the hbalock when calling this routine.
264 **/
Dick Kennedycd22d602017-08-23 16:55:35 -0700265static int
James Smart205e8242018-03-05 12:04:03 -0800266lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
James Smart4f774512009-05-22 14:52:35 -0400267{
James Smart2e90f4b2011-12-13 13:22:37 -0500268 union lpfc_wqe *temp_wqe;
James Smart4f774512009-05-22 14:52:35 -0400269 struct lpfc_register doorbell;
270 uint32_t host_index;
James Smart027140e2012-08-03 12:35:44 -0400271 uint32_t idx;
James Smart1351e692018-02-22 08:18:43 -0800272 uint32_t i = 0;
273 uint8_t *tmp;
James Smart5cc167d2018-06-26 08:24:23 -0700274 u32 if_type;
James Smart4f774512009-05-22 14:52:35 -0400275
James Smart2e90f4b2011-12-13 13:22:37 -0500276 /* sanity check on queue memory */
277 if (unlikely(!q))
278 return -ENOMEM;
James Smart47ff4c52020-11-15 11:26:41 -0800279
James Smart9afbee32019-03-12 16:30:28 -0700280 temp_wqe = lpfc_sli4_qe(q, q->host_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500281
James Smart4f774512009-05-22 14:52:35 -0400282 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400283 idx = ((q->host_index + 1) % q->entry_count);
284 if (idx == q->hba_index) {
James Smartb84daac2012-08-03 12:35:13 -0400285 q->WQ_overflow++;
Dick Kennedycd22d602017-08-23 16:55:35 -0700286 return -EBUSY;
James Smartb84daac2012-08-03 12:35:13 -0400287 }
288 q->WQ_posted++;
James Smart4f774512009-05-22 14:52:35 -0400289 /* set consumption flag every once in a while */
James Smart32517fc2019-01-28 11:14:33 -0800290 if (!((q->host_index + 1) % q->notify_interval))
James Smartf0d9bcc2010-10-22 11:07:09 -0400291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
James Smart04673e32018-01-30 15:58:45 -0800292 else
293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
James Smartfedd3b72011-02-16 12:39:24 -0500294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
James Smart48f8fdb2018-05-04 20:37:51 -0700296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
James Smart1351e692018-02-22 08:18:43 -0800297 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
298 /* write to DPP aperture taking advatage of Combined Writes */
James Smart4c066192018-03-05 10:29:03 -0800299 tmp = (uint8_t *)temp_wqe;
300#ifdef __raw_writeq
James Smart1351e692018-02-22 08:18:43 -0800301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
James Smart4c066192018-03-05 10:29:03 -0800302 __raw_writeq(*((uint64_t *)(tmp + i)),
303 q->dpp_regaddr + i);
304#else
305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp + i)),
307 q->dpp_regaddr + i);
308#endif
James Smart1351e692018-02-22 08:18:43 -0800309 }
310 /* ensure WQE bcopy and DPP flushed before doorbell write */
James Smart6b3b3bd2016-12-19 15:07:30 -0800311 wmb();
James Smart4f774512009-05-22 14:52:35 -0400312
313 /* Update the host index before invoking device */
314 host_index = q->host_index;
James Smart027140e2012-08-03 12:35:44 -0400315
316 q->host_index = idx;
James Smart4f774512009-05-22 14:52:35 -0400317
318 /* Ring Doorbell */
319 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500320 if (q->db_format == LPFC_DB_LIST_FORMAT) {
James Smart1351e692018-02-22 08:18:43 -0800321 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
325 q->dpp_id);
326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
327 q->queue_id);
328 } else {
329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
James Smart1351e692018-02-22 08:18:43 -0800330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
James Smart5cc167d2018-06-26 08:24:23 -0700331
332 /* Leave bits <23:16> clear for if_type 6 dpp */
333 if_type = bf_get(lpfc_sli_intf_if_type,
334 &q->phba->sli4_hba.sli_intf);
335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
336 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
337 host_index);
James Smart1351e692018-02-22 08:18:43 -0800338 }
James Smart962bc512013-01-03 15:44:00 -0500339 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
342 } else {
343 return -EINVAL;
344 }
345 writel(doorbell.word0, q->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400346
347 return 0;
348}
349
350/**
351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
352 * @q: The Work Queue to operate on.
353 * @index: The index to advance the hba index to.
354 *
355 * This routine will update the HBA index of a queue to reflect consumption of
356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
357 * an entry the host calls this function to update the queue's internal
James Smart1543af32020-03-22 11:12:58 -0700358 * pointers.
James Smart4f774512009-05-22 14:52:35 -0400359 **/
James Smart1543af32020-03-22 11:12:58 -0700360static void
James Smart4f774512009-05-22 14:52:35 -0400361lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
362{
James Smart2e90f4b2011-12-13 13:22:37 -0500363 /* sanity check on queue memory */
364 if (unlikely(!q))
James Smart1543af32020-03-22 11:12:58 -0700365 return;
James Smart2e90f4b2011-12-13 13:22:37 -0500366
James Smart1543af32020-03-22 11:12:58 -0700367 q->hba_index = index;
James Smart4f774512009-05-22 14:52:35 -0400368}
369
370/**
371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
372 * @q: The Mailbox Queue to operate on.
Lee Jones7af29d42020-07-21 17:41:31 +0100373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
James Smart4f774512009-05-22 14:52:35 -0400374 *
375 * This routine will copy the contents of @mqe to the next available entry on
376 * the @q. This function will then ring the Work Queue Doorbell to signal the
377 * HBA to start processing the Work Queue Entry. This function returns 0 if
378 * successful. If no entries are available on @q then this function will return
379 * -ENOMEM.
380 * The caller is expected to hold the hbalock when calling this routine.
381 **/
382static uint32_t
383lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
384{
James Smart2e90f4b2011-12-13 13:22:37 -0500385 struct lpfc_mqe *temp_mqe;
James Smart4f774512009-05-22 14:52:35 -0400386 struct lpfc_register doorbell;
James Smart4f774512009-05-22 14:52:35 -0400387
James Smart2e90f4b2011-12-13 13:22:37 -0500388 /* sanity check on queue memory */
389 if (unlikely(!q))
390 return -ENOMEM;
James Smart9afbee32019-03-12 16:30:28 -0700391 temp_mqe = lpfc_sli4_qe(q, q->host_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500392
James Smart4f774512009-05-22 14:52:35 -0400393 /* If the host has not yet processed the next entry then we are done */
394 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
395 return -ENOMEM;
James Smart48f8fdb2018-05-04 20:37:51 -0700396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
James Smart4f774512009-05-22 14:52:35 -0400397 /* Save off the mailbox pointer for completion */
398 q->phba->mbox = (MAILBOX_t *)temp_mqe;
399
400 /* Update the host index before invoking device */
James Smart4f774512009-05-22 14:52:35 -0400401 q->host_index = ((q->host_index + 1) % q->entry_count);
402
403 /* Ring Doorbell */
404 doorbell.word0 = 0;
405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400408 return 0;
409}
410
411/**
412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
413 * @q: The Mailbox Queue to operate on.
414 *
415 * This routine will update the HBA index of a queue to reflect consumption of
416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
417 * an entry the host calls this function to update the queue's internal
418 * pointers. This routine returns the number of entries that were consumed by
419 * the HBA.
420 **/
421static uint32_t
422lpfc_sli4_mq_release(struct lpfc_queue *q)
423{
James Smart2e90f4b2011-12-13 13:22:37 -0500424 /* sanity check on queue memory */
425 if (unlikely(!q))
426 return 0;
427
James Smart4f774512009-05-22 14:52:35 -0400428 /* Clear the mailbox pointer for completion */
429 q->phba->mbox = NULL;
430 q->hba_index = ((q->hba_index + 1) % q->entry_count);
431 return 1;
432}
433
434/**
435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
436 * @q: The Event Queue to get the first valid EQE from
437 *
438 * This routine will get the first valid Event Queue Entry from @q, update
439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
441 * processed, but not popped back to the HBA then this routine will return NULL.
442 **/
443static struct lpfc_eqe *
444lpfc_sli4_eq_get(struct lpfc_queue *q)
445{
James Smart2e90f4b2011-12-13 13:22:37 -0500446 struct lpfc_eqe *eqe;
447
448 /* sanity check on queue memory */
449 if (unlikely(!q))
450 return NULL;
James Smart9afbee32019-03-12 16:30:28 -0700451 eqe = lpfc_sli4_qe(q, q->host_index);
James Smart4f774512009-05-22 14:52:35 -0400452
453 /* If the next EQE is not valid then we are done */
James Smart7365f6f2018-02-22 08:18:46 -0800454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
James Smart4f774512009-05-22 14:52:35 -0400455 return NULL;
James Smart27f344e2014-05-07 17:16:46 -0400456
457 /*
458 * insert barrier for instruction interlock : data from the hardware
459 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800460 * upon. Speculative instructions were allowing a bcopy at the start
461 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
462 * after our return, to copy data before the valid bit check above
463 * was done. As such, some of the copied data was stale. The barrier
464 * ensures the check is before any data is copied.
James Smart27f344e2014-05-07 17:16:46 -0400465 */
466 mb();
James Smart4f774512009-05-22 14:52:35 -0400467 return eqe;
468}
469
470/**
James Smartba20c852012-08-03 12:36:52 -0400471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
472 * @q: The Event Queue to disable interrupts
473 *
474 **/
James Smart92f3b322019-03-20 10:44:22 -0700475void
James Smartba20c852012-08-03 12:36:52 -0400476lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
477{
478 struct lpfc_register doorbell;
479
480 doorbell.word0 = 0;
481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
James Smart9dd35422018-02-22 08:18:41 -0800486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
James Smartba20c852012-08-03 12:36:52 -0400487}
488
489/**
James Smart27d6ac02018-02-22 08:18:42 -0800490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
491 * @q: The Event Queue to disable interrupts
492 *
493 **/
James Smart92f3b322019-03-20 10:44:22 -0700494void
James Smart27d6ac02018-02-22 08:18:42 -0800495lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
496{
497 struct lpfc_register doorbell;
498
499 doorbell.word0 = 0;
James Smartaad59d52018-09-10 10:30:47 -0700500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
James Smart27d6ac02018-02-22 08:18:42 -0800501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
502}
503
504/**
James Smart32517fc2019-01-28 11:14:33 -0800505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
506 * @phba: adapter with EQ
James Smart4f774512009-05-22 14:52:35 -0400507 * @q: The Event Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800508 * @count: Number of elements that have been consumed
James Smart4f774512009-05-22 14:52:35 -0400509 * @arm: Indicates whether the host wants to arms this CQ.
510 *
James Smart32517fc2019-01-28 11:14:33 -0800511 * This routine will notify the HBA, by ringing the doorbell, that count
512 * number of EQEs have been processed. The @arm parameter indicates whether
513 * the queue should be rearmed when ringing the doorbell.
James Smart4f774512009-05-22 14:52:35 -0400514 **/
James Smart32517fc2019-01-28 11:14:33 -0800515void
516lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
517 uint32_t count, bool arm)
James Smart4f774512009-05-22 14:52:35 -0400518{
James Smart4f774512009-05-22 14:52:35 -0400519 struct lpfc_register doorbell;
520
James Smart2e90f4b2011-12-13 13:22:37 -0500521 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800522 if (unlikely(!q || (count == 0 && !arm)))
523 return;
James Smart4f774512009-05-22 14:52:35 -0400524
525 /* ring doorbell for number popped */
526 doorbell.word0 = 0;
527 if (arm) {
528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
530 }
James Smart32517fc2019-01-28 11:14:33 -0800531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
James Smart4f774512009-05-22 14:52:35 -0400532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
James Smart6b5151f2012-01-18 16:24:06 -0500533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
James Smart9dd35422018-02-22 08:18:41 -0800536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
James Smarta747c9c2009-11-18 15:41:10 -0500537 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
James Smart9dd35422018-02-22 08:18:41 -0800539 readl(q->phba->sli4_hba.EQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400540}
541
542/**
James Smart32517fc2019-01-28 11:14:33 -0800543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
544 * @phba: adapter with EQ
James Smart27d6ac02018-02-22 08:18:42 -0800545 * @q: The Event Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800546 * @count: Number of elements that have been consumed
James Smart27d6ac02018-02-22 08:18:42 -0800547 * @arm: Indicates whether the host wants to arms this CQ.
548 *
James Smart32517fc2019-01-28 11:14:33 -0800549 * This routine will notify the HBA, by ringing the doorbell, that count
550 * number of EQEs have been processed. The @arm parameter indicates whether
551 * the queue should be rearmed when ringing the doorbell.
James Smart27d6ac02018-02-22 08:18:42 -0800552 **/
James Smart32517fc2019-01-28 11:14:33 -0800553void
554lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
555 uint32_t count, bool arm)
James Smart27d6ac02018-02-22 08:18:42 -0800556{
James Smart27d6ac02018-02-22 08:18:42 -0800557 struct lpfc_register doorbell;
558
559 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800560 if (unlikely(!q || (count == 0 && !arm)))
561 return;
James Smart27d6ac02018-02-22 08:18:42 -0800562
563 /* ring doorbell for number popped */
564 doorbell.word0 = 0;
565 if (arm)
566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
James Smart32517fc2019-01-28 11:14:33 -0800567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
James Smart27d6ac02018-02-22 08:18:42 -0800568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
570 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
572 readl(q->phba->sli4_hba.EQDBregaddr);
James Smart32517fc2019-01-28 11:14:33 -0800573}
574
575static void
576__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
577 struct lpfc_eqe *eqe)
578{
579 if (!phba->sli4_hba.pc_sli4_params.eqav)
580 bf_set_le32(lpfc_eqe_valid, eqe, 0);
581
582 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
583
584 /* if the index wrapped around, toggle the valid bit */
585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
586 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
587}
588
589static void
James Smart24c7c0a2019-09-21 20:58:58 -0700590lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
James Smart32517fc2019-01-28 11:14:33 -0800591{
James Smart24c7c0a2019-09-21 20:58:58 -0700592 struct lpfc_eqe *eqe = NULL;
593 u32 eq_count = 0, cq_count = 0;
594 struct lpfc_cqe *cqe = NULL;
595 struct lpfc_queue *cq = NULL, *childq = NULL;
596 int cqid = 0;
James Smart32517fc2019-01-28 11:14:33 -0800597
598 /* walk all the EQ entries and drop on the floor */
599 eqe = lpfc_sli4_eq_get(eq);
600 while (eqe) {
James Smart24c7c0a2019-09-21 20:58:58 -0700601 /* Get the reference to the corresponding CQ */
602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
603 cq = NULL;
604
605 list_for_each_entry(childq, &eq->child_list, list) {
606 if (childq->queue_id == cqid) {
607 cq = childq;
608 break;
609 }
610 }
611 /* If CQ is valid, iterate through it and drop all the CQEs */
612 if (cq) {
613 cqe = lpfc_sli4_cq_get(cq);
614 while (cqe) {
615 __lpfc_sli4_consume_cqe(phba, cq, cqe);
616 cq_count++;
617 cqe = lpfc_sli4_cq_get(cq);
618 }
619 /* Clear and re-arm the CQ */
620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
621 LPFC_QUEUE_REARM);
622 cq_count = 0;
623 }
James Smart32517fc2019-01-28 11:14:33 -0800624 __lpfc_sli4_consume_eqe(phba, eq, eqe);
James Smart24c7c0a2019-09-21 20:58:58 -0700625 eq_count++;
James Smart32517fc2019-01-28 11:14:33 -0800626 eqe = lpfc_sli4_eq_get(eq);
627 }
628
629 /* Clear and re-arm the EQ */
James Smart24c7c0a2019-09-21 20:58:58 -0700630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
James Smart32517fc2019-01-28 11:14:33 -0800631}
632
633static int
James Smart93a4d6f2019-11-04 16:57:05 -0800634lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
635 uint8_t rearm)
James Smart32517fc2019-01-28 11:14:33 -0800636{
637 struct lpfc_eqe *eqe;
638 int count = 0, consumed = 0;
639
640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
641 goto rearm_and_exit;
642
643 eqe = lpfc_sli4_eq_get(eq);
644 while (eqe) {
645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
646 __lpfc_sli4_consume_eqe(phba, eq, eqe);
647
648 consumed++;
649 if (!(++count % eq->max_proc_limit))
650 break;
651
652 if (!(count % eq->notify_interval)) {
653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
654 LPFC_QUEUE_NOARM);
655 consumed = 0;
656 }
657
658 eqe = lpfc_sli4_eq_get(eq);
659 }
660 eq->EQ_processed += count;
661
662 /* Track the max number of EQEs processed in 1 intr */
663 if (count > eq->EQ_max_eqe)
664 eq->EQ_max_eqe = count;
665
Dick Kennedy164ba8d2020-05-01 14:43:03 -0700666 xchg(&eq->queue_claimed, 0);
James Smart32517fc2019-01-28 11:14:33 -0800667
668rearm_and_exit:
James Smart93a4d6f2019-11-04 16:57:05 -0800669 /* Always clear the EQ. */
670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
James Smart32517fc2019-01-28 11:14:33 -0800671
672 return count;
James Smart27d6ac02018-02-22 08:18:42 -0800673}
674
675/**
James Smart4f774512009-05-22 14:52:35 -0400676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
677 * @q: The Completion Queue to get the first valid CQE from
678 *
679 * This routine will get the first valid Completion Queue Entry from @q, update
680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
682 * processed, but not popped back to the HBA then this routine will return NULL.
683 **/
684static struct lpfc_cqe *
685lpfc_sli4_cq_get(struct lpfc_queue *q)
686{
687 struct lpfc_cqe *cqe;
688
James Smart2e90f4b2011-12-13 13:22:37 -0500689 /* sanity check on queue memory */
690 if (unlikely(!q))
691 return NULL;
James Smart9afbee32019-03-12 16:30:28 -0700692 cqe = lpfc_sli4_qe(q, q->host_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500693
James Smart4f774512009-05-22 14:52:35 -0400694 /* If the next CQE is not valid then we are done */
James Smart7365f6f2018-02-22 08:18:46 -0800695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
James Smart4f774512009-05-22 14:52:35 -0400696 return NULL;
James Smart27f344e2014-05-07 17:16:46 -0400697
698 /*
699 * insert barrier for instruction interlock : data from the hardware
700 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800701 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
702 * instructions allowing action on content before valid bit checked,
703 * add barrier here as well. May not be needed as "content" is a
704 * single 32-bit entity here (vs multi word structure for cq's).
James Smart27f344e2014-05-07 17:16:46 -0400705 */
706 mb();
James Smart4f774512009-05-22 14:52:35 -0400707 return cqe;
708}
709
James Smart32517fc2019-01-28 11:14:33 -0800710static void
711__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
712 struct lpfc_cqe *cqe)
713{
714 if (!phba->sli4_hba.pc_sli4_params.cqav)
715 bf_set_le32(lpfc_cqe_valid, cqe, 0);
716
717 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
718
719 /* if the index wrapped around, toggle the valid bit */
720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
721 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
722}
723
James Smart4f774512009-05-22 14:52:35 -0400724/**
James Smart32517fc2019-01-28 11:14:33 -0800725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
726 * @phba: the adapter with the CQ
James Smart4f774512009-05-22 14:52:35 -0400727 * @q: The Completion Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800728 * @count: the number of elements that were consumed
James Smart4f774512009-05-22 14:52:35 -0400729 * @arm: Indicates whether the host wants to arms this CQ.
730 *
James Smart32517fc2019-01-28 11:14:33 -0800731 * This routine will notify the HBA, by ringing the doorbell, that the
732 * CQEs have been processed. The @arm parameter specifies whether the
733 * queue should be rearmed when ringing the doorbell.
James Smart4f774512009-05-22 14:52:35 -0400734 **/
James Smart32517fc2019-01-28 11:14:33 -0800735void
736lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
737 uint32_t count, bool arm)
James Smart4f774512009-05-22 14:52:35 -0400738{
James Smart4f774512009-05-22 14:52:35 -0400739 struct lpfc_register doorbell;
740
James Smart2e90f4b2011-12-13 13:22:37 -0500741 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800742 if (unlikely(!q || (count == 0 && !arm)))
743 return;
James Smart4f774512009-05-22 14:52:35 -0400744
745 /* ring doorbell for number popped */
746 doorbell.word0 = 0;
747 if (arm)
748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
James Smart32517fc2019-01-28 11:14:33 -0800749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
James Smart4f774512009-05-22 14:52:35 -0400750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
James Smart6b5151f2012-01-18 16:24:06 -0500751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
James Smart9dd35422018-02-22 08:18:41 -0800754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400755}
756
757/**
James Smart32517fc2019-01-28 11:14:33 -0800758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
759 * @phba: the adapter with the CQ
James Smart27d6ac02018-02-22 08:18:42 -0800760 * @q: The Completion Queue that the host has completed processing for.
James Smart32517fc2019-01-28 11:14:33 -0800761 * @count: the number of elements that were consumed
James Smart27d6ac02018-02-22 08:18:42 -0800762 * @arm: Indicates whether the host wants to arms this CQ.
763 *
James Smart32517fc2019-01-28 11:14:33 -0800764 * This routine will notify the HBA, by ringing the doorbell, that the
765 * CQEs have been processed. The @arm parameter specifies whether the
766 * queue should be rearmed when ringing the doorbell.
James Smart27d6ac02018-02-22 08:18:42 -0800767 **/
James Smart32517fc2019-01-28 11:14:33 -0800768void
769lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
770 uint32_t count, bool arm)
James Smart27d6ac02018-02-22 08:18:42 -0800771{
James Smart27d6ac02018-02-22 08:18:42 -0800772 struct lpfc_register doorbell;
773
774 /* sanity check on queue memory */
James Smart32517fc2019-01-28 11:14:33 -0800775 if (unlikely(!q || (count == 0 && !arm)))
776 return;
James Smart27d6ac02018-02-22 08:18:42 -0800777
778 /* ring doorbell for number popped */
779 doorbell.word0 = 0;
780 if (arm)
781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
James Smart32517fc2019-01-28 11:14:33 -0800782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
James Smart27d6ac02018-02-22 08:18:42 -0800783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
James Smart27d6ac02018-02-22 08:18:42 -0800785}
786
Lee Jones7af29d42020-07-21 17:41:31 +0100787/*
James Smart4f774512009-05-22 14:52:35 -0400788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
James Smart4f774512009-05-22 14:52:35 -0400789 *
790 * This routine will copy the contents of @wqe to the next available entry on
791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
792 * HBA to start processing the Receive Queue Entry. This function returns the
793 * index that the rqe was copied to if successful. If no entries are available
794 * on @q then this function will return -ENOMEM.
795 * The caller is expected to hold the hbalock when calling this routine.
796 **/
James Smart895427b2017-02-12 13:52:30 -0800797int
James Smart4f774512009-05-22 14:52:35 -0400798lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
800{
James Smart2e90f4b2011-12-13 13:22:37 -0500801 struct lpfc_rqe *temp_hrqe;
802 struct lpfc_rqe *temp_drqe;
James Smart4f774512009-05-22 14:52:35 -0400803 struct lpfc_register doorbell;
James Smartcbc5de12017-12-08 17:18:04 -0800804 int hq_put_index;
805 int dq_put_index;
James Smart4f774512009-05-22 14:52:35 -0400806
James Smart2e90f4b2011-12-13 13:22:37 -0500807 /* sanity check on queue memory */
808 if (unlikely(!hq) || unlikely(!dq))
809 return -ENOMEM;
James Smartcbc5de12017-12-08 17:18:04 -0800810 hq_put_index = hq->host_index;
811 dq_put_index = dq->host_index;
James Smart9afbee32019-03-12 16:30:28 -0700812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
James Smart2e90f4b2011-12-13 13:22:37 -0500814
James Smart4f774512009-05-22 14:52:35 -0400815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
816 return -EINVAL;
James Smartcbc5de12017-12-08 17:18:04 -0800817 if (hq_put_index != dq_put_index)
James Smart4f774512009-05-22 14:52:35 -0400818 return -EINVAL;
819 /* If the host has not yet processed the next entry then we are done */
James Smartcbc5de12017-12-08 17:18:04 -0800820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
James Smart4f774512009-05-22 14:52:35 -0400821 return -EBUSY;
James Smart48f8fdb2018-05-04 20:37:51 -0700822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
James Smart4f774512009-05-22 14:52:35 -0400824
825 /* Update the host index to point to the next slot */
James Smartcbc5de12017-12-08 17:18:04 -0800826 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
827 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
James Smart61f3d4b2017-05-15 15:20:41 -0700828 hq->RQ_buf_posted++;
James Smart4f774512009-05-22 14:52:35 -0400829
830 /* Ring The Header Receive Queue Doorbell */
James Smart32517fc2019-01-28 11:14:33 -0800831 if (!(hq->host_index % hq->notify_interval)) {
James Smart4f774512009-05-22 14:52:35 -0400832 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500833 if (hq->db_format == LPFC_DB_RING_FORMAT) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
James Smart32517fc2019-01-28 11:14:33 -0800835 hq->notify_interval);
James Smart962bc512013-01-03 15:44:00 -0500836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
James Smart32517fc2019-01-28 11:14:33 -0800839 hq->notify_interval);
James Smart962bc512013-01-03 15:44:00 -0500840 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
841 hq->host_index);
842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
843 } else {
844 return -EINVAL;
845 }
846 writel(doorbell.word0, hq->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400847 }
James Smartcbc5de12017-12-08 17:18:04 -0800848 return hq_put_index;
James Smart4f774512009-05-22 14:52:35 -0400849}
850
Lee Jones7af29d42020-07-21 17:41:31 +0100851/*
James Smart4f774512009-05-22 14:52:35 -0400852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
James Smart4f774512009-05-22 14:52:35 -0400853 *
854 * This routine will update the HBA index of a queue to reflect consumption of
855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
856 * consumed an entry the host calls this function to update the queue's
857 * internal pointers. This routine returns the number of entries that were
858 * consumed by the HBA.
859 **/
860static uint32_t
861lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
862{
James Smart2e90f4b2011-12-13 13:22:37 -0500863 /* sanity check on queue memory */
864 if (unlikely(!hq) || unlikely(!dq))
865 return 0;
866
James Smart4f774512009-05-22 14:52:35 -0400867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
868 return 0;
869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
871 return 1;
872}
873
James Smarte59058c2008-08-24 21:49:00 -0400874/**
James Smart3621a712009-04-06 18:47:14 -0400875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400876 * @phba: Pointer to HBA context object.
877 * @pring: Pointer to driver SLI ring object.
878 *
879 * This function returns pointer to next command iocb entry
880 * in the command ring. The caller must hold hbalock to prevent
881 * other threads consume the next command iocb.
882 * SLI-2/SLI-3 provide different sized iocbs.
883 **/
James Smarted957682007-06-17 19:56:37 -0500884static inline IOCB_t *
885lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
886{
James Smart7e56aa22012-08-03 12:35:34 -0400887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
James Smarted957682007-06-17 19:56:37 -0500889}
890
James Smarte59058c2008-08-24 21:49:00 -0400891/**
James Smart3621a712009-04-06 18:47:14 -0400892 * lpfc_resp_iocb - Get next response iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400893 * @phba: Pointer to HBA context object.
894 * @pring: Pointer to driver SLI ring object.
895 *
896 * This function returns pointer to next response iocb entry
897 * in the response ring. The caller must hold hbalock to make sure
898 * that no other thread consume the next response iocb.
899 * SLI-2/SLI-3 provide different sized iocbs.
900 **/
James Smarted957682007-06-17 19:56:37 -0500901static inline IOCB_t *
902lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
903{
James Smart7e56aa22012-08-03 12:35:34 -0400904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
905 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
James Smarted957682007-06-17 19:56:37 -0500906}
907
James Smarte59058c2008-08-24 21:49:00 -0400908/**
James Smart3621a712009-04-06 18:47:14 -0400909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400910 * @phba: Pointer to HBA context object.
911 *
912 * This function is called with hbalock held. This function
913 * allocates a new driver iocb object from the iocb pool. If the
914 * allocation is successful, it returns pointer to the newly
915 * allocated iocb object else it returns NULL.
916 **/
James Smart4f2e66c2012-05-09 21:17:07 -0400917struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -0500918__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400919{
920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
921 struct lpfc_iocbq * iocbq = NULL;
922
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100923 lockdep_assert_held(&phba->hbalock);
924
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
James Smart2a9bf3d2010-06-07 15:24:45 -0400926 if (iocbq)
927 phba->iocb_cnt++;
928 if (phba->iocb_cnt > phba->iocb_max)
929 phba->iocb_max = phba->iocb_cnt;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400930 return iocbq;
931}
932
James Smarte59058c2008-08-24 21:49:00 -0400933/**
James Smartda0436e2009-05-22 14:51:39 -0400934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
935 * @phba: Pointer to HBA context object.
936 * @xritag: XRI value.
937 *
James Smart3bfab8a2021-04-11 18:31:23 -0700938 * This function clears the sglq pointer from the array of active
James Smartda0436e2009-05-22 14:51:39 -0400939 * sglq's. The xritag that is passed in is used to index into the
940 * array. Before the xritag can be used it needs to be adjusted
941 * by subtracting the xribase.
942 *
943 * Returns sglq ponter = success, NULL = Failure.
944 **/
James Smart895427b2017-02-12 13:52:30 -0800945struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400946__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
947{
James Smartda0436e2009-05-22 14:51:39 -0400948 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400949
950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
James Smartda0436e2009-05-22 14:51:39 -0400952 return sglq;
953}
954
955/**
956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
957 * @phba: Pointer to HBA context object.
958 * @xritag: XRI value.
959 *
James Smart3bfab8a2021-04-11 18:31:23 -0700960 * This function returns the sglq pointer from the array of active
James Smartda0436e2009-05-22 14:51:39 -0400961 * sglq's. The xritag that is passed in is used to index into the
962 * array. Before the xritag can be used it needs to be adjusted
963 * by subtracting the xribase.
964 *
965 * Returns sglq ponter = success, NULL = Failure.
966 **/
James Smart0f65ff62010-02-26 14:14:23 -0500967struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400968__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
969{
James Smartda0436e2009-05-22 14:51:39 -0400970 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400971
972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
James Smartda0436e2009-05-22 14:51:39 -0400973 return sglq;
974}
975
976/**
James Smart1151e3e2011-02-16 12:39:35 -0500977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500978 * @phba: Pointer to HBA context object.
979 * @xritag: xri used in this exchange.
980 * @rrq: The RRQ to be cleared.
981 *
James Smart19ca7602010-11-20 23:11:55 -0500982 **/
James Smart1151e3e2011-02-16 12:39:35 -0500983void
984lpfc_clr_rrq_active(struct lpfc_hba *phba,
985 uint16_t xritag,
986 struct lpfc_node_rrq *rrq)
James Smart19ca7602010-11-20 23:11:55 -0500987{
James Smart1151e3e2011-02-16 12:39:35 -0500988 struct lpfc_nodelist *ndlp = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500989
James Smart2693f5d2021-03-01 09:18:03 -0800990 /* Lookup did to verify if did is still active on this vport */
James Smart307e3382020-11-15 11:26:30 -0800991 if (rrq->vport)
James Smart1151e3e2011-02-16 12:39:35 -0500992 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
James Smart19ca7602010-11-20 23:11:55 -0500993
James Smart1151e3e2011-02-16 12:39:35 -0500994 if (!ndlp)
995 goto out;
996
James Smartcff261f2013-12-17 20:29:47 -0500997 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500998 rrq->send_rrq = 0;
999 rrq->xritag = 0;
1000 rrq->rrq_stop_time = 0;
1001 }
James Smart1151e3e2011-02-16 12:39:35 -05001002out:
James Smart19ca7602010-11-20 23:11:55 -05001003 mempool_free(rrq, phba->rrq_pool);
1004}
1005
1006/**
1007 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1008 * @phba: Pointer to HBA context object.
1009 *
1010 * This function is called with hbalock held. This function
1011 * Checks if stop_time (ratov from setting rrq active) has
1012 * been reached, if it has and the send_rrq flag is set then
1013 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1014 * then it will just call the routine to clear the rrq and
1015 * free the rrq resource.
1016 * The timer is set to the next rrq that is going to expire before
1017 * leaving the routine.
1018 *
1019 **/
1020void
1021lpfc_handle_rrq_active(struct lpfc_hba *phba)
1022{
1023 struct lpfc_node_rrq *rrq;
1024 struct lpfc_node_rrq *nextrrq;
1025 unsigned long next_time;
1026 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -05001027 LIST_HEAD(send_rrq);
James Smart19ca7602010-11-20 23:11:55 -05001028
1029 spin_lock_irqsave(&phba->hbalock, iflags);
1030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
James Smart256ec0d2013-04-17 20:14:58 -04001031 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smart19ca7602010-11-20 23:11:55 -05001032 list_for_each_entry_safe(rrq, nextrrq,
James Smart1151e3e2011-02-16 12:39:35 -05001033 &phba->active_rrq_list, list) {
1034 if (time_after(jiffies, rrq->rrq_stop_time))
1035 list_move(&rrq->list, &send_rrq);
1036 else if (time_before(rrq->rrq_stop_time, next_time))
James Smart19ca7602010-11-20 23:11:55 -05001037 next_time = rrq->rrq_stop_time;
1038 }
1039 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart06918ac2014-02-20 09:57:57 -05001040 if ((!list_empty(&phba->active_rrq_list)) &&
1041 (!(phba->pport->load_flag & FC_UNLOADING)))
James Smart19ca7602010-11-20 23:11:55 -05001042 mod_timer(&phba->rrq_tmr, next_time);
James Smart1151e3e2011-02-16 12:39:35 -05001043 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044 list_del(&rrq->list);
Bart Van Asscheffd43812019-03-28 11:06:17 -07001045 if (!rrq->send_rrq) {
James Smart1151e3e2011-02-16 12:39:35 -05001046 /* this call will free the rrq */
Bart Van Asscheffd43812019-03-28 11:06:17 -07001047 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048 } else if (lpfc_send_rrq(phba, rrq)) {
James Smart1151e3e2011-02-16 12:39:35 -05001049 /* if we send the rrq then the completion handler
1050 * will clear the bit in the xribitmap.
1051 */
1052 lpfc_clr_rrq_active(phba, rrq->xritag,
1053 rrq);
1054 }
1055 }
James Smart19ca7602010-11-20 23:11:55 -05001056}
1057
1058/**
1059 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1060 * @vport: Pointer to vport context object.
1061 * @xri: The xri used in the exchange.
1062 * @did: The targets DID for this exchange.
1063 *
1064 * returns NULL = rrq not found in the phba->active_rrq_list.
1065 * rrq = rrq for this xri and target.
1066 **/
1067struct lpfc_node_rrq *
1068lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1069{
1070 struct lpfc_hba *phba = vport->phba;
1071 struct lpfc_node_rrq *rrq;
1072 struct lpfc_node_rrq *nextrrq;
1073 unsigned long iflags;
1074
1075 if (phba->sli_rev != LPFC_SLI_REV4)
1076 return NULL;
1077 spin_lock_irqsave(&phba->hbalock, iflags);
1078 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079 if (rrq->vport == vport && rrq->xritag == xri &&
1080 rrq->nlp_DID == did){
1081 list_del(&rrq->list);
1082 spin_unlock_irqrestore(&phba->hbalock, iflags);
1083 return rrq;
1084 }
1085 }
1086 spin_unlock_irqrestore(&phba->hbalock, iflags);
1087 return NULL;
1088}
1089
1090/**
1091 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1092 * @vport: Pointer to vport context object.
James Smart1151e3e2011-02-16 12:39:35 -05001093 * @ndlp: Pointer to the lpfc_node_list structure.
1094 * If ndlp is NULL Remove all active RRQs for this vport from the
1095 * phba->active_rrq_list and clear the rrq.
1096 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
James Smart19ca7602010-11-20 23:11:55 -05001097 **/
1098void
James Smart1151e3e2011-02-16 12:39:35 -05001099lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
James Smart19ca7602010-11-20 23:11:55 -05001100
1101{
1102 struct lpfc_hba *phba = vport->phba;
1103 struct lpfc_node_rrq *rrq;
1104 struct lpfc_node_rrq *nextrrq;
1105 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -05001106 LIST_HEAD(rrq_list);
James Smart19ca7602010-11-20 23:11:55 -05001107
1108 if (phba->sli_rev != LPFC_SLI_REV4)
1109 return;
James Smart1151e3e2011-02-16 12:39:35 -05001110 if (!ndlp) {
1111 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
James Smart19ca7602010-11-20 23:11:55 -05001113 }
James Smart1151e3e2011-02-16 12:39:35 -05001114 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart2693f5d2021-03-01 09:18:03 -08001115 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116 if (rrq->vport != vport)
1117 continue;
1118
1119 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
James Smart1151e3e2011-02-16 12:39:35 -05001120 list_move(&rrq->list, &rrq_list);
James Smart2693f5d2021-03-01 09:18:03 -08001121
1122 }
James Smart19ca7602010-11-20 23:11:55 -05001123 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart1151e3e2011-02-16 12:39:35 -05001124
1125 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126 list_del(&rrq->list);
1127 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1128 }
James Smart19ca7602010-11-20 23:11:55 -05001129}
1130
1131/**
James Smart1151e3e2011-02-16 12:39:35 -05001132 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -05001133 * @phba: Pointer to HBA context object.
1134 * @ndlp: Targets nodelist pointer for this exchange.
Lee Jones7af29d42020-07-21 17:41:31 +01001135 * @xritag: the xri in the bitmap to test.
James Smart19ca7602010-11-20 23:11:55 -05001136 *
James Smarte2a8be52019-05-06 17:26:47 -07001137 * This function returns:
1138 * 0 = rrq not active for this xri
1139 * 1 = rrq is valid for this xri.
James Smart19ca7602010-11-20 23:11:55 -05001140 **/
James Smart1151e3e2011-02-16 12:39:35 -05001141int
1142lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smart19ca7602010-11-20 23:11:55 -05001143 uint16_t xritag)
1144{
James Smart19ca7602010-11-20 23:11:55 -05001145 if (!ndlp)
1146 return 0;
James Smartcff261f2013-12-17 20:29:47 -05001147 if (!ndlp->active_rrqs_xri_bitmap)
1148 return 0;
1149 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
Colin Ian King258f84f2019-02-12 15:29:45 +00001150 return 1;
James Smart19ca7602010-11-20 23:11:55 -05001151 else
1152 return 0;
1153}
1154
1155/**
1156 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1157 * @phba: Pointer to HBA context object.
1158 * @ndlp: nodelist pointer for this target.
1159 * @xritag: xri used in this exchange.
1160 * @rxid: Remote Exchange ID.
1161 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1162 *
1163 * This function takes the hbalock.
1164 * The active bit is always set in the active rrq xri_bitmap even
1165 * if there is no slot avaiable for the other rrq information.
1166 *
1167 * returns 0 rrq actived for this xri
1168 * < 0 No memory or invalid ndlp.
1169 **/
1170int
1171lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smartb42c07c2012-01-18 16:25:55 -05001172 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
James Smart19ca7602010-11-20 23:11:55 -05001173{
James Smart19ca7602010-11-20 23:11:55 -05001174 unsigned long iflags;
James Smartb42c07c2012-01-18 16:25:55 -05001175 struct lpfc_node_rrq *rrq;
1176 int empty;
1177
1178 if (!ndlp)
1179 return -EINVAL;
1180
1181 if (!phba->cfg_enable_rrq)
1182 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -05001183
1184 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -05001185 if (phba->pport->load_flag & FC_UNLOADING) {
1186 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1187 goto out;
1188 }
1189
James Smartb42c07c2012-01-18 16:25:55 -05001190 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191 goto out;
1192
James Smartcff261f2013-12-17 20:29:47 -05001193 if (!ndlp->active_rrqs_xri_bitmap)
1194 goto out;
1195
1196 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
James Smartb42c07c2012-01-18 16:25:55 -05001197 goto out;
1198
James Smart19ca7602010-11-20 23:11:55 -05001199 spin_unlock_irqrestore(&phba->hbalock, iflags);
Dick Kennedy9dace1f2020-06-30 14:49:53 -07001200 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
James Smartb42c07c2012-01-18 16:25:55 -05001201 if (!rrq) {
1202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204 " DID:0x%x Send:%d\n",
1205 xritag, rxid, ndlp->nlp_DID, send_rrq);
1206 return -EINVAL;
1207 }
James Smarte5771b42013-03-01 16:37:14 -05001208 if (phba->cfg_enable_rrq == 1)
1209 rrq->send_rrq = send_rrq;
1210 else
1211 rrq->send_rrq = 0;
James Smartb42c07c2012-01-18 16:25:55 -05001212 rrq->xritag = xritag;
James Smart256ec0d2013-04-17 20:14:58 -04001213 rrq->rrq_stop_time = jiffies +
1214 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smartb42c07c2012-01-18 16:25:55 -05001215 rrq->nlp_DID = ndlp->nlp_DID;
1216 rrq->vport = ndlp->vport;
1217 rrq->rxid = rxid;
James Smartb42c07c2012-01-18 16:25:55 -05001218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 empty = list_empty(&phba->active_rrq_list);
1220 list_add_tail(&rrq->list, &phba->active_rrq_list);
1221 phba->hba_flag |= HBA_RRQ_ACTIVE;
1222 if (empty)
1223 lpfc_worker_wake_up(phba);
1224 spin_unlock_irqrestore(&phba->hbalock, iflags);
1225 return 0;
1226out:
1227 spin_unlock_irqrestore(&phba->hbalock, iflags);
1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 " DID:0x%x Send:%d\n",
1231 xritag, rxid, ndlp->nlp_DID, send_rrq);
1232 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -05001233}
1234
1235/**
James Smart895427b2017-02-12 13:52:30 -08001236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
James Smartda0436e2009-05-22 14:51:39 -04001237 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01001238 * @piocbq: Pointer to the iocbq.
James Smartda0436e2009-05-22 14:51:39 -04001239 *
James Smarte2a8be52019-05-06 17:26:47 -07001240 * The driver calls this function with either the nvme ls ring lock
1241 * or the fc els ring lock held depending on the iocb usage. This function
1242 * gets a new driver sglq object from the sglq list. If the list is not empty
1243 * then it is successful, it returns pointer to the newly allocated sglq
1244 * object else it returns NULL.
James Smartda0436e2009-05-22 14:51:39 -04001245 **/
1246static struct lpfc_sglq *
James Smart895427b2017-02-12 13:52:30 -08001247__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
James Smartda0436e2009-05-22 14:51:39 -04001248{
James Smart895427b2017-02-12 13:52:30 -08001249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
James Smartda0436e2009-05-22 14:51:39 -04001250 struct lpfc_sglq *sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -05001251 struct lpfc_sglq *start_sglq = NULL;
James Smartc4908502019-01-28 11:14:28 -08001252 struct lpfc_io_buf *lpfc_cmd;
James Smart19ca7602010-11-20 23:11:55 -05001253 struct lpfc_nodelist *ndlp;
James Smarte2a8be52019-05-06 17:26:47 -07001254 struct lpfc_sli_ring *pring = NULL;
James Smart19ca7602010-11-20 23:11:55 -05001255 int found = 0;
1256
James Smarte2a8be52019-05-06 17:26:47 -07001257 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258 pring = phba->sli4_hba.nvmels_wq->pring;
1259 else
1260 pring = lpfc_phba_elsring(phba);
1261
1262 lockdep_assert_held(&pring->ring_lock);
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001263
James Smart19ca7602010-11-20 23:11:55 -05001264 if (piocbq->iocb_flag & LPFC_IO_FCP) {
James Smartc4908502019-01-28 11:14:28 -08001265 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
James Smart19ca7602010-11-20 23:11:55 -05001266 ndlp = lpfc_cmd->rdata->pnode;
James Smartbe858b62010-12-15 17:57:20 -05001267 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
James Smart6c7cf482015-04-07 15:07:25 -04001268 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
James Smart19ca7602010-11-20 23:11:55 -05001269 ndlp = piocbq->context_un.ndlp;
James Smart6c7cf482015-04-07 15:07:25 -04001270 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1272 ndlp = NULL;
1273 else
1274 ndlp = piocbq->context_un.ndlp;
1275 } else {
James Smart19ca7602010-11-20 23:11:55 -05001276 ndlp = piocbq->context1;
James Smart6c7cf482015-04-07 15:07:25 -04001277 }
James Smart19ca7602010-11-20 23:11:55 -05001278
James Smart895427b2017-02-12 13:52:30 -08001279 spin_lock(&phba->sli4_hba.sgl_list_lock);
1280 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
James Smart19ca7602010-11-20 23:11:55 -05001281 start_sglq = sglq;
1282 while (!found) {
1283 if (!sglq)
James Smartd11f54b2017-03-04 09:30:24 -08001284 break;
James Smart895427b2017-02-12 13:52:30 -08001285 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286 test_bit(sglq->sli4_lxritag,
1287 ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -05001288 /* This xri has an rrq outstanding for this DID.
1289 * put it back in the list and get another xri.
1290 */
James Smart895427b2017-02-12 13:52:30 -08001291 list_add_tail(&sglq->list, lpfc_els_sgl_list);
James Smart19ca7602010-11-20 23:11:55 -05001292 sglq = NULL;
James Smart895427b2017-02-12 13:52:30 -08001293 list_remove_head(lpfc_els_sgl_list, sglq,
James Smart19ca7602010-11-20 23:11:55 -05001294 struct lpfc_sglq, list);
1295 if (sglq == start_sglq) {
James Smart14041bd2017-06-01 21:07:01 -07001296 list_add_tail(&sglq->list, lpfc_els_sgl_list);
James Smart19ca7602010-11-20 23:11:55 -05001297 sglq = NULL;
1298 break;
1299 } else
1300 continue;
1301 }
1302 sglq->ndlp = ndlp;
1303 found = 1;
James Smart6d368e52011-05-24 11:44:12 -04001304 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
James Smart19ca7602010-11-20 23:11:55 -05001305 sglq->state = SGL_ALLOCATED;
1306 }
James Smart895427b2017-02-12 13:52:30 -08001307 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smartda0436e2009-05-22 14:51:39 -04001308 return sglq;
1309}
1310
1311/**
James Smartf358dd02017-02-12 13:52:34 -08001312 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1313 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01001314 * @piocbq: Pointer to the iocbq.
James Smartf358dd02017-02-12 13:52:34 -08001315 *
1316 * This function is called with the sgl_list lock held. This function
1317 * gets a new driver sglq object from the sglq list. If the
1318 * list is not empty then it is successful, it returns pointer to the newly
1319 * allocated sglq object else it returns NULL.
1320 **/
1321struct lpfc_sglq *
1322__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1323{
1324 struct list_head *lpfc_nvmet_sgl_list;
1325 struct lpfc_sglq *sglq = NULL;
1326
1327 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1328
1329 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1330
1331 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1332 if (!sglq)
1333 return NULL;
1334 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335 sglq->state = SGL_ALLOCATED;
dea31012005-04-17 16:05:31 -05001336 return sglq;
1337}
1338
James Smarte59058c2008-08-24 21:49:00 -04001339/**
James Smart3621a712009-04-06 18:47:14 -04001340 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001341 * @phba: Pointer to HBA context object.
1342 *
1343 * This function is called with no lock held. This function
1344 * allocates a new driver iocb object from the iocb pool. If the
1345 * allocation is successful, it returns pointer to the newly
1346 * allocated iocb object else it returns NULL.
1347 **/
James Smart2e0fef82007-06-17 19:56:36 -05001348struct lpfc_iocbq *
1349lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James Bottomley604a3e32005-10-29 10:28:33 -05001350{
James Smart2e0fef82007-06-17 19:56:36 -05001351 struct lpfc_iocbq * iocbq = NULL;
1352 unsigned long iflags;
1353
1354 spin_lock_irqsave(&phba->hbalock, iflags);
1355 iocbq = __lpfc_sli_get_iocbq(phba);
1356 spin_unlock_irqrestore(&phba->hbalock, iflags);
1357 return iocbq;
1358}
1359
James Smarte59058c2008-08-24 21:49:00 -04001360/**
James Smart4f774512009-05-22 14:52:35 -04001361 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1362 * @phba: Pointer to HBA context object.
1363 * @iocbq: Pointer to driver iocb object.
1364 *
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001365 * This function is called to release the driver iocb object
1366 * to the iocb pool. The iotag in the iocb object
James Smart4f774512009-05-22 14:52:35 -04001367 * does not change for each use of the iocb object. This function
1368 * clears all other fields of the iocb object when it is freed.
1369 * The sqlq structure that holds the xritag and phys and virtual
1370 * mappings for the scatter gather list is retrieved from the
1371 * active array of sglq. The get of the sglq pointer also clears
1372 * the entry in the array. If the status of the IO indiactes that
1373 * this IO was aborted then the sglq entry it put on the
1374 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1375 * IO has good status or fails for any other reason then the sglq
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001376 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1377 * asserted held in the code path calling this routine.
James Smart4f774512009-05-22 14:52:35 -04001378 **/
1379static void
1380__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1381{
1382 struct lpfc_sglq *sglq;
1383 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04001384 unsigned long iflag = 0;
James Smart895427b2017-02-12 13:52:30 -08001385 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04001386
1387 if (iocbq->sli4_xritag == NO_XRI)
1388 sglq = NULL;
1389 else
James Smart6d368e52011-05-24 11:44:12 -04001390 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1391
James Smart0e9bb8d2013-03-01 16:35:12 -05001392
James Smart4f774512009-05-22 14:52:35 -04001393 if (sglq) {
James Smartf358dd02017-02-12 13:52:34 -08001394 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1396 iflag);
James Smart0f65ff62010-02-26 14:14:23 -05001397 sglq->state = SGL_FREED;
James Smart19ca7602010-11-20 23:11:55 -05001398 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001399 list_add_tail(&sglq->list,
James Smartf358dd02017-02-12 13:52:34 -08001400 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401 spin_unlock_irqrestore(
1402 &phba->sli4_hba.sgl_list_lock, iflag);
1403 goto out;
1404 }
1405
James Smart4f774512009-05-22 14:52:35 -04001406 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407 (sglq->state != SGL_XRI_ABORTED)) {
James Smart895427b2017-02-12 13:52:30 -08001408 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1409 iflag);
James Smart307e3382020-11-15 11:26:30 -08001410
1411 /* Check if we can get a reference on ndlp */
1412 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1413 sglq->ndlp = NULL;
1414
James Smart341af102010-01-26 23:07:37 -05001415 list_add(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001416 &phba->sli4_hba.lpfc_abts_els_sgl_list);
James Smart4f774512009-05-22 14:52:35 -04001417 spin_unlock_irqrestore(
James Smart895427b2017-02-12 13:52:30 -08001418 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart4f774512009-05-22 14:52:35 -04001419 } else {
James Smart895427b2017-02-12 13:52:30 -08001420 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1421 iflag);
James Smart4f774512009-05-22 14:52:35 -04001422 sglq->state = SGL_FREED;
1423 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001424 list_add_tail(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001425 &phba->sli4_hba.lpfc_els_sgl_list);
1426 spin_unlock_irqrestore(
1427 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart9628aac2021-03-01 09:18:16 -08001428 pring = lpfc_phba_elsring(phba);
James Smart2a9bf3d2010-06-07 15:24:45 -04001429 /* Check if TXQ queue needs to be serviced */
James Smart9628aac2021-03-01 09:18:16 -08001430 if (pring && (!list_empty(&pring->txq)))
James Smart2a9bf3d2010-06-07 15:24:45 -04001431 lpfc_worker_wake_up(phba);
James Smart0f65ff62010-02-26 14:14:23 -05001432 }
James Smart4f774512009-05-22 14:52:35 -04001433 }
1434
James Smartf358dd02017-02-12 13:52:34 -08001435out:
James Smart4f774512009-05-22 14:52:35 -04001436 /*
1437 * Clean all volatile data fields, preserve iotag and node struct.
1438 */
1439 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
James Smart6d368e52011-05-24 11:44:12 -04001440 iocbq->sli4_lxritag = NO_XRI;
James Smart4f774512009-05-22 14:52:35 -04001441 iocbq->sli4_xritag = NO_XRI;
James Smartf358dd02017-02-12 13:52:34 -08001442 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1443 LPFC_IO_NVME_LS);
James Smart4f774512009-05-22 14:52:35 -04001444 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1445}
1446
James Smart2a9bf3d2010-06-07 15:24:45 -04001447
James Smart4f774512009-05-22 14:52:35 -04001448/**
James Smart3772a992009-05-22 14:50:54 -04001449 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1450 * @phba: Pointer to HBA context object.
1451 * @iocbq: Pointer to driver iocb object.
1452 *
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001453 * This function is called to release the driver iocb object to the
1454 * iocb pool. The iotag in the iocb object does not change for each
1455 * use of the iocb object. This function clears all other fields of
1456 * the iocb object when it is freed. The hbalock is asserted held in
1457 * the code path calling this routine.
James Smart3772a992009-05-22 14:50:54 -04001458 **/
1459static void
1460__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1461{
1462 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1463
James Smart0e9bb8d2013-03-01 16:35:12 -05001464 /*
James Smart3772a992009-05-22 14:50:54 -04001465 * Clean all volatile data fields, preserve iotag and node struct.
1466 */
1467 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468 iocbq->sli4_xritag = NO_XRI;
1469 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1470}
1471
1472/**
James Smart3621a712009-04-06 18:47:14 -04001473 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001474 * @phba: Pointer to HBA context object.
1475 * @iocbq: Pointer to driver iocb object.
1476 *
1477 * This function is called with hbalock held to release driver
1478 * iocb object to the iocb pool. The iotag in the iocb object
1479 * does not change for each use of the iocb object. This function
1480 * clears all other fields of the iocb object when it is freed.
1481 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001482static void
James Smart2e0fef82007-06-17 19:56:36 -05001483__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1484{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001485 lockdep_assert_held(&phba->hbalock);
1486
James Smart3772a992009-05-22 14:50:54 -04001487 phba->__lpfc_sli_release_iocbq(phba, iocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -04001488 phba->iocb_cnt--;
James Bottomley604a3e32005-10-29 10:28:33 -05001489}
1490
James Smarte59058c2008-08-24 21:49:00 -04001491/**
James Smart3621a712009-04-06 18:47:14 -04001492 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001493 * @phba: Pointer to HBA context object.
1494 * @iocbq: Pointer to driver iocb object.
1495 *
1496 * This function is called with no lock held to release the iocb to
1497 * iocb pool.
1498 **/
James Smart2e0fef82007-06-17 19:56:36 -05001499void
1500lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1501{
1502 unsigned long iflags;
1503
1504 /*
1505 * Clean all volatile data fields, preserve iotag and node struct.
1506 */
1507 spin_lock_irqsave(&phba->hbalock, iflags);
1508 __lpfc_sli_release_iocbq(phba, iocbq);
1509 spin_unlock_irqrestore(&phba->hbalock, iflags);
1510}
1511
James Smarte59058c2008-08-24 21:49:00 -04001512/**
James Smarta257bf92009-04-06 18:48:10 -04001513 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1514 * @phba: Pointer to HBA context object.
1515 * @iocblist: List of IOCBs.
1516 * @ulpstatus: ULP status in IOCB command field.
1517 * @ulpWord4: ULP word-4 in IOCB command field.
1518 *
1519 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1520 * on the list by invoking the complete callback function associated with the
1521 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1522 * fields.
1523 **/
1524void
1525lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526 uint32_t ulpstatus, uint32_t ulpWord4)
1527{
1528 struct lpfc_iocbq *piocb;
1529
1530 while (!list_empty(iocblist)) {
1531 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
James Smart9ec58ec2021-01-04 10:02:35 -08001532 if (piocb->wqe_cmpl) {
James Smart84f2ddf2019-08-14 16:56:55 -07001533 if (piocb->iocb_flag & LPFC_IO_NVME)
James Smart9ec58ec2021-01-04 10:02:35 -08001534 lpfc_nvme_cancel_iocb(phba, piocb,
1535 ulpstatus, ulpWord4);
James Smart84f2ddf2019-08-14 16:56:55 -07001536 else
1537 lpfc_sli_release_iocbq(phba, piocb);
James Smart9ec58ec2021-01-04 10:02:35 -08001538
1539 } else if (piocb->iocb_cmpl) {
James Smarta257bf92009-04-06 18:48:10 -04001540 piocb->iocb.ulpStatus = ulpstatus;
1541 piocb->iocb.un.ulpWord[4] = ulpWord4;
1542 (piocb->iocb_cmpl) (phba, piocb, piocb);
James Smart9ec58ec2021-01-04 10:02:35 -08001543 } else {
1544 lpfc_sli_release_iocbq(phba, piocb);
James Smarta257bf92009-04-06 18:48:10 -04001545 }
1546 }
1547 return;
1548}
1549
1550/**
James Smart3621a712009-04-06 18:47:14 -04001551 * lpfc_sli_iocb_cmd_type - Get the iocb type
1552 * @iocb_cmnd: iocb command code.
James Smarte59058c2008-08-24 21:49:00 -04001553 *
1554 * This function is called by ring event handler function to get the iocb type.
1555 * This function translates the iocb command to an iocb command type used to
1556 * decide the final disposition of each completed IOCB.
1557 * The function returns
1558 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1559 * LPFC_SOL_IOCB if it is a solicited iocb completion
1560 * LPFC_ABORT_IOCB if it is an abort iocb
1561 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1562 *
1563 * The caller is not required to hold any lock.
1564 **/
dea31012005-04-17 16:05:31 -05001565static lpfc_iocb_type
1566lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1567{
1568 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1569
1570 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1571 return 0;
1572
1573 switch (iocb_cmnd) {
1574 case CMD_XMIT_SEQUENCE_CR:
1575 case CMD_XMIT_SEQUENCE_CX:
1576 case CMD_XMIT_BCAST_CN:
1577 case CMD_XMIT_BCAST_CX:
1578 case CMD_ELS_REQUEST_CR:
1579 case CMD_ELS_REQUEST_CX:
1580 case CMD_CREATE_XRI_CR:
1581 case CMD_CREATE_XRI_CX:
1582 case CMD_GET_RPI_CN:
1583 case CMD_XMIT_ELS_RSP_CX:
1584 case CMD_GET_RPI_CR:
1585 case CMD_FCP_IWRITE_CR:
1586 case CMD_FCP_IWRITE_CX:
1587 case CMD_FCP_IREAD_CR:
1588 case CMD_FCP_IREAD_CX:
1589 case CMD_FCP_ICMND_CR:
1590 case CMD_FCP_ICMND_CX:
James Smartf5603512006-12-02 13:35:43 -05001591 case CMD_FCP_TSEND_CX:
1592 case CMD_FCP_TRSP_CX:
1593 case CMD_FCP_TRECEIVE_CX:
1594 case CMD_FCP_AUTO_TRSP_CX:
dea31012005-04-17 16:05:31 -05001595 case CMD_ADAPTER_MSG:
1596 case CMD_ADAPTER_DUMP:
1597 case CMD_XMIT_SEQUENCE64_CR:
1598 case CMD_XMIT_SEQUENCE64_CX:
1599 case CMD_XMIT_BCAST64_CN:
1600 case CMD_XMIT_BCAST64_CX:
1601 case CMD_ELS_REQUEST64_CR:
1602 case CMD_ELS_REQUEST64_CX:
1603 case CMD_FCP_IWRITE64_CR:
1604 case CMD_FCP_IWRITE64_CX:
1605 case CMD_FCP_IREAD64_CR:
1606 case CMD_FCP_IREAD64_CX:
1607 case CMD_FCP_ICMND64_CR:
1608 case CMD_FCP_ICMND64_CX:
James Smartf5603512006-12-02 13:35:43 -05001609 case CMD_FCP_TSEND64_CX:
1610 case CMD_FCP_TRSP64_CX:
1611 case CMD_FCP_TRECEIVE64_CX:
dea31012005-04-17 16:05:31 -05001612 case CMD_GEN_REQUEST64_CR:
1613 case CMD_GEN_REQUEST64_CX:
1614 case CMD_XMIT_ELS_RSP64_CX:
James Smartda0436e2009-05-22 14:51:39 -04001615 case DSSCMD_IWRITE64_CR:
1616 case DSSCMD_IWRITE64_CX:
1617 case DSSCMD_IREAD64_CR:
1618 case DSSCMD_IREAD64_CX:
Dick Kennedyc93764a2020-06-30 14:49:49 -07001619 case CMD_SEND_FRAME:
dea31012005-04-17 16:05:31 -05001620 type = LPFC_SOL_IOCB;
1621 break;
1622 case CMD_ABORT_XRI_CN:
1623 case CMD_ABORT_XRI_CX:
1624 case CMD_CLOSE_XRI_CN:
1625 case CMD_CLOSE_XRI_CX:
1626 case CMD_XRI_ABORTED_CX:
1627 case CMD_ABORT_MXRI64_CN:
James Smart6669f9b2009-10-02 15:16:45 -04001628 case CMD_XMIT_BLS_RSP64_CX:
dea31012005-04-17 16:05:31 -05001629 type = LPFC_ABORT_IOCB;
1630 break;
1631 case CMD_RCV_SEQUENCE_CX:
1632 case CMD_RCV_ELS_REQ_CX:
1633 case CMD_RCV_SEQUENCE64_CX:
1634 case CMD_RCV_ELS_REQ64_CX:
James Smart57127f12007-10-27 13:37:05 -04001635 case CMD_ASYNC_STATUS:
James Smarted957682007-06-17 19:56:37 -05001636 case CMD_IOCB_RCV_SEQ64_CX:
1637 case CMD_IOCB_RCV_ELS64_CX:
1638 case CMD_IOCB_RCV_CONT64_CX:
James Smart3163f722008-02-08 18:50:25 -05001639 case CMD_IOCB_RET_XRI64_CX:
dea31012005-04-17 16:05:31 -05001640 type = LPFC_UNSOL_IOCB;
1641 break;
James Smart3163f722008-02-08 18:50:25 -05001642 case CMD_IOCB_XMIT_MSEQ64_CR:
1643 case CMD_IOCB_XMIT_MSEQ64_CX:
1644 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645 case CMD_IOCB_RCV_ELS_LIST64_CX:
1646 case CMD_IOCB_CLOSE_EXTENDED_CN:
1647 case CMD_IOCB_ABORT_EXTENDED_CN:
1648 case CMD_IOCB_RET_HBQE64_CN:
1649 case CMD_IOCB_FCP_IBIDIR64_CR:
1650 case CMD_IOCB_FCP_IBIDIR64_CX:
1651 case CMD_IOCB_FCP_ITASKMGT64_CX:
1652 case CMD_IOCB_LOGENTRY_CN:
1653 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654 printk("%s - Unhandled SLI-3 Command x%x\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07001655 __func__, iocb_cmnd);
James Smart3163f722008-02-08 18:50:25 -05001656 type = LPFC_UNKNOWN_IOCB;
1657 break;
dea31012005-04-17 16:05:31 -05001658 default:
1659 type = LPFC_UNKNOWN_IOCB;
1660 break;
1661 }
1662
1663 return type;
1664}
1665
James Smarte59058c2008-08-24 21:49:00 -04001666/**
James Smart3621a712009-04-06 18:47:14 -04001667 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
James Smarte59058c2008-08-24 21:49:00 -04001668 * @phba: Pointer to HBA context object.
1669 *
1670 * This function is called from SLI initialization code
1671 * to configure every ring of the HBA's SLI interface. The
1672 * caller is not required to hold any lock. This function issues
1673 * a config_ring mailbox command for each ring.
1674 * This function returns zero if successful else returns a negative
1675 * error code.
1676 **/
dea31012005-04-17 16:05:31 -05001677static int
James Smarted957682007-06-17 19:56:37 -05001678lpfc_sli_ring_map(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05001679{
1680 struct lpfc_sli *psli = &phba->sli;
James Smarted957682007-06-17 19:56:37 -05001681 LPFC_MBOXQ_t *pmb;
1682 MAILBOX_t *pmbox;
1683 int i, rc, ret = 0;
dea31012005-04-17 16:05:31 -05001684
James Smarted957682007-06-17 19:56:37 -05001685 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1686 if (!pmb)
1687 return -ENOMEM;
James Smart04c68492009-05-22 14:52:52 -04001688 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05001689 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05001690 for (i = 0; i < psli->num_rings; i++) {
dea31012005-04-17 16:05:31 -05001691 lpfc_config_ring(phba, i, pmb);
1692 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07001694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04001695 "0446 Adapter failed to init (%d), "
dea31012005-04-17 16:05:31 -05001696 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1697 "ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001698 rc, pmbox->mbxCommand,
1699 pmbox->mbxStatus, i);
James Smart2e0fef82007-06-17 19:56:36 -05001700 phba->link_state = LPFC_HBA_ERROR;
James Smarted957682007-06-17 19:56:37 -05001701 ret = -ENXIO;
1702 break;
dea31012005-04-17 16:05:31 -05001703 }
1704 }
James Smarted957682007-06-17 19:56:37 -05001705 mempool_free(pmb, phba->mbox_mem_pool);
1706 return ret;
dea31012005-04-17 16:05:31 -05001707}
1708
James Smarte59058c2008-08-24 21:49:00 -04001709/**
James Smart3621a712009-04-06 18:47:14 -04001710 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
James Smarte59058c2008-08-24 21:49:00 -04001711 * @phba: Pointer to HBA context object.
1712 * @pring: Pointer to driver SLI ring object.
1713 * @piocb: Pointer to the driver iocb object.
1714 *
James Smarte2a8be52019-05-06 17:26:47 -07001715 * The driver calls this function with the hbalock held for SLI3 ports or
1716 * the ring lock held for SLI4 ports. The function adds the
James Smarte59058c2008-08-24 21:49:00 -04001717 * new iocb to txcmplq of the given ring. This function always returns
1718 * 0. If this function is called for ELS ring, this function checks if
1719 * there is a vport associated with the ELS command. This function also
1720 * starts els_tmofunc timer if this is an ELS command.
1721 **/
dea31012005-04-17 16:05:31 -05001722static int
James Smart2e0fef82007-06-17 19:56:36 -05001723lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05001725{
James Smarte2a8be52019-05-06 17:26:47 -07001726 if (phba->sli_rev == LPFC_SLI_REV4)
1727 lockdep_assert_held(&pring->ring_lock);
1728 else
1729 lockdep_assert_held(&phba->hbalock);
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001730
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001731 BUG_ON(!piocb);
Johannes Thumshirn22466da2016-07-29 15:30:56 +02001732
dea31012005-04-17 16:05:31 -05001733 list_add_tail(&piocb->list, &pring->txcmplq);
James Smart4f2e66c2012-05-09 21:17:07 -04001734 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
James Smartc4908502019-01-28 11:14:28 -08001735 pring->txcmplq_cnt++;
James Smart2a9bf3d2010-06-07 15:24:45 -04001736
James Smart92d7f7b2007-06-17 19:56:38 -05001737 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001739 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740 BUG_ON(!piocb->vport);
1741 if (!(piocb->vport->load_flag & FC_UNLOADING))
1742 mod_timer(&piocb->vport->els_tmofunc,
1743 jiffies +
1744 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1745 }
dea31012005-04-17 16:05:31 -05001746
James Smart2e0fef82007-06-17 19:56:36 -05001747 return 0;
dea31012005-04-17 16:05:31 -05001748}
1749
James Smarte59058c2008-08-24 21:49:00 -04001750/**
James Smart3621a712009-04-06 18:47:14 -04001751 * lpfc_sli_ringtx_get - Get first element of the txq
James Smarte59058c2008-08-24 21:49:00 -04001752 * @phba: Pointer to HBA context object.
1753 * @pring: Pointer to driver SLI ring object.
1754 *
1755 * This function is called with hbalock held to get next
1756 * iocb in txq of the given ring. If there is any iocb in
1757 * the txq, the function returns first iocb in the list after
1758 * removing the iocb from the list, else it returns NULL.
1759 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04001760struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05001761lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001762{
dea31012005-04-17 16:05:31 -05001763 struct lpfc_iocbq *cmd_iocb;
1764
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001765 lockdep_assert_held(&phba->hbalock);
1766
James Smart858c9f62007-06-17 19:56:39 -05001767 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
James Smart2e0fef82007-06-17 19:56:36 -05001768 return cmd_iocb;
dea31012005-04-17 16:05:31 -05001769}
1770
James Smarte59058c2008-08-24 21:49:00 -04001771/**
James Smart3621a712009-04-06 18:47:14 -04001772 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
James Smarte59058c2008-08-24 21:49:00 -04001773 * @phba: Pointer to HBA context object.
1774 * @pring: Pointer to driver SLI ring object.
1775 *
1776 * This function is called with hbalock held and the caller must post the
1777 * iocb without releasing the lock. If the caller releases the lock,
1778 * iocb slot returned by the function is not guaranteed to be available.
1779 * The function returns pointer to the next available iocb slot if there
1780 * is available slot in the ring, else it returns NULL.
1781 * If the get index of the ring is ahead of the put index, the function
1782 * will post an error attention event to the worker thread to take the
1783 * HBA to offline state.
1784 **/
dea31012005-04-17 16:05:31 -05001785static IOCB_t *
1786lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1787{
James Smart34b02dc2008-08-24 21:49:55 -04001788 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James Smart7e56aa22012-08-03 12:35:34 -04001789 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001790
1791 lockdep_assert_held(&phba->hbalock);
1792
James Smart7e56aa22012-08-03 12:35:34 -04001793 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1794 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1795 pring->sli.sli3.next_cmdidx = 0;
dea31012005-04-17 16:05:31 -05001796
James Smart7e56aa22012-08-03 12:35:34 -04001797 if (unlikely(pring->sli.sli3.local_getidx ==
1798 pring->sli.sli3.next_cmdidx)) {
dea31012005-04-17 16:05:31 -05001799
James Smart7e56aa22012-08-03 12:35:34 -04001800 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05001801
James Smart7e56aa22012-08-03 12:35:34 -04001802 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07001803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04001804 "0315 Ring %d issue: portCmdGet %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02001805 "is bigger than cmd ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001806 pring->ringno,
James Smart7e56aa22012-08-03 12:35:34 -04001807 pring->sli.sli3.local_getidx,
1808 max_cmd_idx);
dea31012005-04-17 16:05:31 -05001809
James Smart2e0fef82007-06-17 19:56:36 -05001810 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05001811 /*
1812 * All error attention handlers are posted to
1813 * worker thread
1814 */
1815 phba->work_ha |= HA_ERATT;
1816 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05001817
James Smart5e9d9b82008-06-14 22:52:53 -04001818 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -05001819
1820 return NULL;
1821 }
1822
James Smart7e56aa22012-08-03 12:35:34 -04001823 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea31012005-04-17 16:05:31 -05001824 return NULL;
1825 }
1826
James Smarted957682007-06-17 19:56:37 -05001827 return lpfc_cmd_iocb(phba, pring);
dea31012005-04-17 16:05:31 -05001828}
1829
James Smarte59058c2008-08-24 21:49:00 -04001830/**
James Smart3621a712009-04-06 18:47:14 -04001831 * lpfc_sli_next_iotag - Get an iotag for the iocb
James Smarte59058c2008-08-24 21:49:00 -04001832 * @phba: Pointer to HBA context object.
1833 * @iocbq: Pointer to driver iocb object.
1834 *
1835 * This function gets an iotag for the iocb. If there is no unused iotag and
1836 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1837 * array and assigns a new iotag.
1838 * The function returns the allocated iotag if successful, else returns zero.
1839 * Zero is not a valid iotag.
1840 * The caller is not required to hold any lock.
1841 **/
James Bottomley604a3e32005-10-29 10:28:33 -05001842uint16_t
James Smart2e0fef82007-06-17 19:56:36 -05001843lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea31012005-04-17 16:05:31 -05001844{
James Smart2e0fef82007-06-17 19:56:36 -05001845 struct lpfc_iocbq **new_arr;
1846 struct lpfc_iocbq **old_arr;
James Bottomley604a3e32005-10-29 10:28:33 -05001847 size_t new_len;
1848 struct lpfc_sli *psli = &phba->sli;
1849 uint16_t iotag;
dea31012005-04-17 16:05:31 -05001850
James Smart2e0fef82007-06-17 19:56:36 -05001851 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001852 iotag = psli->last_iotag;
1853 if(++iotag < psli->iocbq_lookup_len) {
1854 psli->last_iotag = iotag;
1855 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001856 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001857 iocbq->iotag = iotag;
1858 return iotag;
James Smart2e0fef82007-06-17 19:56:36 -05001859 } else if (psli->iocbq_lookup_len < (0xffff
James Bottomley604a3e32005-10-29 10:28:33 -05001860 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1861 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
James Smart2e0fef82007-06-17 19:56:36 -05001862 spin_unlock_irq(&phba->hbalock);
Kees Cook6396bb22018-06-12 14:03:40 -07001863 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
James Bottomley604a3e32005-10-29 10:28:33 -05001864 GFP_KERNEL);
1865 if (new_arr) {
James Smart2e0fef82007-06-17 19:56:36 -05001866 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001867 old_arr = psli->iocbq_lookup;
1868 if (new_len <= psli->iocbq_lookup_len) {
1869 /* highly unprobable case */
1870 kfree(new_arr);
1871 iotag = psli->last_iotag;
1872 if(++iotag < psli->iocbq_lookup_len) {
1873 psli->last_iotag = iotag;
1874 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001875 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001876 iocbq->iotag = iotag;
1877 return iotag;
1878 }
James Smart2e0fef82007-06-17 19:56:36 -05001879 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001880 return 0;
1881 }
1882 if (psli->iocbq_lookup)
1883 memcpy(new_arr, old_arr,
1884 ((psli->last_iotag + 1) *
James Smart311464e2007-08-02 11:10:37 -04001885 sizeof (struct lpfc_iocbq *)));
James Bottomley604a3e32005-10-29 10:28:33 -05001886 psli->iocbq_lookup = new_arr;
1887 psli->iocbq_lookup_len = new_len;
1888 psli->last_iotag = iotag;
1889 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001890 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001891 iocbq->iotag = iotag;
1892 kfree(old_arr);
1893 return iotag;
1894 }
James Smart8f6d98d2006-08-01 07:34:00 -04001895 } else
James Smart2e0fef82007-06-17 19:56:36 -05001896 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001897
James Smartbc739052010-08-04 16:11:18 -04001898 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001899 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1900 psli->last_iotag);
dea31012005-04-17 16:05:31 -05001901
James Bottomley604a3e32005-10-29 10:28:33 -05001902 return 0;
dea31012005-04-17 16:05:31 -05001903}
1904
James Smarte59058c2008-08-24 21:49:00 -04001905/**
James Smart3621a712009-04-06 18:47:14 -04001906 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
James Smarte59058c2008-08-24 21:49:00 -04001907 * @phba: Pointer to HBA context object.
1908 * @pring: Pointer to driver SLI ring object.
1909 * @iocb: Pointer to iocb slot in the ring.
1910 * @nextiocb: Pointer to driver iocb object which need to be
1911 * posted to firmware.
1912 *
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001913 * This function is called to post a new iocb to the firmware. This
1914 * function copies the new iocb to ring iocb slot and updates the
1915 * ring pointers. It adds the new iocb to txcmplq if there is
James Smarte59058c2008-08-24 21:49:00 -04001916 * a completion call back for this iocb else the function will free the
Dick Kennedy88acb4d2020-05-01 14:43:07 -07001917 * iocb object. The hbalock is asserted held in the code path calling
1918 * this routine.
James Smarte59058c2008-08-24 21:49:00 -04001919 **/
dea31012005-04-17 16:05:31 -05001920static void
1921lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1922 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1923{
1924 /*
James Bottomley604a3e32005-10-29 10:28:33 -05001925 * Set up an iotag
dea31012005-04-17 16:05:31 -05001926 */
James Bottomley604a3e32005-10-29 10:28:33 -05001927 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea31012005-04-17 16:05:31 -05001928
James Smarte2a0a9d2008-12-04 22:40:02 -05001929
James Smarta58cbd52007-08-02 11:09:43 -04001930 if (pring->ringno == LPFC_ELS_RING) {
1931 lpfc_debugfs_slow_ring_trc(phba,
1932 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1933 *(((uint32_t *) &nextiocb->iocb) + 4),
1934 *(((uint32_t *) &nextiocb->iocb) + 6),
1935 *(((uint32_t *) &nextiocb->iocb) + 7));
1936 }
1937
dea31012005-04-17 16:05:31 -05001938 /*
1939 * Issue iocb command to adapter
1940 */
James Smart92d7f7b2007-06-17 19:56:38 -05001941 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea31012005-04-17 16:05:31 -05001942 wmb();
1943 pring->stats.iocb_cmd++;
1944
1945 /*
1946 * If there is no completion routine to call, we can release the
1947 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1948 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1949 */
1950 if (nextiocb->iocb_cmpl)
1951 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
James Bottomley604a3e32005-10-29 10:28:33 -05001952 else
James Smart2e0fef82007-06-17 19:56:36 -05001953 __lpfc_sli_release_iocbq(phba, nextiocb);
dea31012005-04-17 16:05:31 -05001954
1955 /*
1956 * Let the HBA know what IOCB slot will be the next one the
1957 * driver will put a command into.
1958 */
James Smart7e56aa22012-08-03 12:35:34 -04001959 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1960 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea31012005-04-17 16:05:31 -05001961}
1962
James Smarte59058c2008-08-24 21:49:00 -04001963/**
James Smart3621a712009-04-06 18:47:14 -04001964 * lpfc_sli_update_full_ring - Update the chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001965 * @phba: Pointer to HBA context object.
1966 * @pring: Pointer to driver SLI ring object.
1967 *
1968 * The caller is not required to hold any lock for calling this function.
1969 * This function updates the chip attention bits for the ring to inform firmware
1970 * that there are pending work to be done for this ring and requests an
1971 * interrupt when there is space available in the ring. This function is
1972 * called when the driver is unable to post more iocbs to the ring due
1973 * to unavailability of space in the ring.
1974 **/
dea31012005-04-17 16:05:31 -05001975static void
James Smart2e0fef82007-06-17 19:56:36 -05001976lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001977{
1978 int ringno = pring->ringno;
1979
1980 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1981
1982 wmb();
1983
1984 /*
1985 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1986 * The HBA will tell us when an IOCB entry is available.
1987 */
1988 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1989 readl(phba->CAregaddr); /* flush */
1990
1991 pring->stats.iocb_cmd_full++;
1992}
1993
James Smarte59058c2008-08-24 21:49:00 -04001994/**
James Smart3621a712009-04-06 18:47:14 -04001995 * lpfc_sli_update_ring - Update chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001996 * @phba: Pointer to HBA context object.
1997 * @pring: Pointer to driver SLI ring object.
1998 *
1999 * This function updates the chip attention register bit for the
2000 * given ring to inform HBA that there is more work to be done
2001 * in this ring. The caller is not required to hold any lock.
2002 **/
dea31012005-04-17 16:05:31 -05002003static void
James Smart2e0fef82007-06-17 19:56:36 -05002004lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05002005{
2006 int ringno = pring->ringno;
2007
2008 /*
2009 * Tell the HBA that there is work to do in this ring.
2010 */
James Smart34b02dc2008-08-24 21:49:55 -04002011 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2012 wmb();
2013 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2014 readl(phba->CAregaddr); /* flush */
2015 }
dea31012005-04-17 16:05:31 -05002016}
2017
James Smarte59058c2008-08-24 21:49:00 -04002018/**
James Smart3621a712009-04-06 18:47:14 -04002019 * lpfc_sli_resume_iocb - Process iocbs in the txq
James Smarte59058c2008-08-24 21:49:00 -04002020 * @phba: Pointer to HBA context object.
2021 * @pring: Pointer to driver SLI ring object.
2022 *
2023 * This function is called with hbalock held to post pending iocbs
2024 * in the txq to the firmware. This function is called when driver
2025 * detects space available in the ring.
2026 **/
dea31012005-04-17 16:05:31 -05002027static void
James Smart2e0fef82007-06-17 19:56:36 -05002028lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05002029{
2030 IOCB_t *iocb;
2031 struct lpfc_iocbq *nextiocb;
2032
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002033 lockdep_assert_held(&phba->hbalock);
2034
dea31012005-04-17 16:05:31 -05002035 /*
2036 * Check to see if:
2037 * (a) there is anything on the txq to send
2038 * (b) link is up
2039 * (c) link attention events can be processed (fcp ring only)
2040 * (d) IOCB processing is not blocked by the outstanding mbox command.
2041 */
James Smart0e9bb8d2013-03-01 16:35:12 -05002042
2043 if (lpfc_is_link_up(phba) &&
2044 (!list_empty(&pring->txq)) &&
James Smart895427b2017-02-12 13:52:30 -08002045 (pring->ringno != LPFC_FCP_RING ||
James Smart0b727fe2007-10-27 13:37:25 -04002046 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea31012005-04-17 16:05:31 -05002047
2048 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2049 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2050 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2051
2052 if (iocb)
2053 lpfc_sli_update_ring(phba, pring);
2054 else
2055 lpfc_sli_update_full_ring(phba, pring);
2056 }
2057
2058 return;
2059}
2060
James Smarte59058c2008-08-24 21:49:00 -04002061/**
James Smart3621a712009-04-06 18:47:14 -04002062 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
James Smarte59058c2008-08-24 21:49:00 -04002063 * @phba: Pointer to HBA context object.
2064 * @hbqno: HBQ number.
2065 *
2066 * This function is called with hbalock held to get the next
2067 * available slot for the given HBQ. If there is free slot
2068 * available for the HBQ it will return pointer to the next available
2069 * HBQ entry else it will return NULL.
2070 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002071static struct lpfc_hbq_entry *
James Smarted957682007-06-17 19:56:37 -05002072lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2073{
2074 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2075
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002076 lockdep_assert_held(&phba->hbalock);
2077
James Smarted957682007-06-17 19:56:37 -05002078 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2079 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2080 hbqp->next_hbqPutIdx = 0;
2081
2082 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
James Smart92d7f7b2007-06-17 19:56:38 -05002083 uint32_t raw_index = phba->hbq_get[hbqno];
James Smarted957682007-06-17 19:56:37 -05002084 uint32_t getidx = le32_to_cpu(raw_index);
2085
2086 hbqp->local_hbqGetIdx = getidx;
2087
2088 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07002089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04002090 "1802 HBQ %d: local_hbqGetIdx "
James Smarted957682007-06-17 19:56:37 -05002091 "%u is > than hbqp->entry_count %u\n",
James Smarte8b62012007-08-02 11:10:09 -04002092 hbqno, hbqp->local_hbqGetIdx,
James Smarted957682007-06-17 19:56:37 -05002093 hbqp->entry_count);
2094
2095 phba->link_state = LPFC_HBA_ERROR;
2096 return NULL;
2097 }
2098
2099 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2100 return NULL;
2101 }
2102
James Smart51ef4c22007-08-02 11:10:31 -04002103 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2104 hbqp->hbqPutIdx;
James Smarted957682007-06-17 19:56:37 -05002105}
2106
James Smarte59058c2008-08-24 21:49:00 -04002107/**
James Smart3621a712009-04-06 18:47:14 -04002108 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
James Smarte59058c2008-08-24 21:49:00 -04002109 * @phba: Pointer to HBA context object.
2110 *
2111 * This function is called with no lock held to free all the
2112 * hbq buffers while uninitializing the SLI interface. It also
2113 * frees the HBQ buffers returned by the firmware but not yet
2114 * processed by the upper layers.
2115 **/
James Smarted957682007-06-17 19:56:37 -05002116void
2117lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2118{
James Smart92d7f7b2007-06-17 19:56:38 -05002119 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2120 struct hbq_dmabuf *hbq_buf;
James Smart3163f722008-02-08 18:50:25 -05002121 unsigned long flags;
James Smart51ef4c22007-08-02 11:10:31 -04002122 int i, hbq_count;
James Smarted957682007-06-17 19:56:37 -05002123
James Smart51ef4c22007-08-02 11:10:31 -04002124 hbq_count = lpfc_sli_hbq_count();
James Smarted957682007-06-17 19:56:37 -05002125 /* Return all memory used by all HBQs */
James Smart3163f722008-02-08 18:50:25 -05002126 spin_lock_irqsave(&phba->hbalock, flags);
James Smart51ef4c22007-08-02 11:10:31 -04002127 for (i = 0; i < hbq_count; ++i) {
2128 list_for_each_entry_safe(dmabuf, next_dmabuf,
2129 &phba->hbqs[i].hbq_buffer_list, list) {
2130 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2131 list_del(&hbq_buf->dbuf.list);
2132 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2133 }
James Smarta8adb832007-10-27 13:37:53 -04002134 phba->hbqs[i].buffer_count = 0;
James Smarted957682007-06-17 19:56:37 -05002135 }
James Smart3163f722008-02-08 18:50:25 -05002136
2137 /* Mark the HBQs not in use */
2138 phba->hbq_in_use = 0;
2139 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smarted957682007-06-17 19:56:37 -05002140}
2141
James Smarte59058c2008-08-24 21:49:00 -04002142/**
James Smart3621a712009-04-06 18:47:14 -04002143 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04002144 * @phba: Pointer to HBA context object.
2145 * @hbqno: HBQ number.
2146 * @hbq_buf: Pointer to HBQ buffer.
2147 *
2148 * This function is called with the hbalock held to post a
2149 * hbq buffer to the firmware. If the function finds an empty
2150 * slot in the HBQ, it will post the buffer. The function will return
2151 * pointer to the hbq entry if it successfully post the buffer
2152 * else it will return NULL.
2153 **/
James Smart3772a992009-05-22 14:50:54 -04002154static int
James Smarted957682007-06-17 19:56:37 -05002155lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
James Smart92d7f7b2007-06-17 19:56:38 -05002156 struct hbq_dmabuf *hbq_buf)
James Smarted957682007-06-17 19:56:37 -05002157{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002158 lockdep_assert_held(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04002159 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2160}
2161
2162/**
2163 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2164 * @phba: Pointer to HBA context object.
2165 * @hbqno: HBQ number.
2166 * @hbq_buf: Pointer to HBQ buffer.
2167 *
2168 * This function is called with the hbalock held to post a hbq buffer to the
2169 * firmware. If the function finds an empty slot in the HBQ, it will post the
2170 * buffer and place it on the hbq_buffer_list. The function will return zero if
2171 * it successfully post the buffer else it will return an error.
2172 **/
2173static int
2174lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2175 struct hbq_dmabuf *hbq_buf)
2176{
James Smarted957682007-06-17 19:56:37 -05002177 struct lpfc_hbq_entry *hbqe;
James Smart92d7f7b2007-06-17 19:56:38 -05002178 dma_addr_t physaddr = hbq_buf->dbuf.phys;
James Smarted957682007-06-17 19:56:37 -05002179
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002180 lockdep_assert_held(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05002181 /* Get next HBQ entry slot to use */
2182 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2183 if (hbqe) {
2184 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2185
James Smart92d7f7b2007-06-17 19:56:38 -05002186 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2187 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
James Smart895427b2017-02-12 13:52:30 -08002188 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
James Smarted957682007-06-17 19:56:37 -05002189 hbqe->bde.tus.f.bdeFlags = 0;
James Smart92d7f7b2007-06-17 19:56:38 -05002190 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2191 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2192 /* Sync SLIM */
James Smarted957682007-06-17 19:56:37 -05002193 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2194 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
James Smart92d7f7b2007-06-17 19:56:38 -05002195 /* flush */
James Smarted957682007-06-17 19:56:37 -05002196 readl(phba->hbq_put + hbqno);
James Smart51ef4c22007-08-02 11:10:31 -04002197 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
James Smart3772a992009-05-22 14:50:54 -04002198 return 0;
2199 } else
2200 return -ENOMEM;
James Smarted957682007-06-17 19:56:37 -05002201}
2202
James Smart4f774512009-05-22 14:52:35 -04002203/**
2204 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2205 * @phba: Pointer to HBA context object.
2206 * @hbqno: HBQ number.
2207 * @hbq_buf: Pointer to HBQ buffer.
2208 *
2209 * This function is called with the hbalock held to post an RQE to the SLI4
2210 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2211 * the hbq_buffer_list and return zero, otherwise it will return an error.
2212 **/
2213static int
2214lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2215 struct hbq_dmabuf *hbq_buf)
2216{
2217 int rc;
2218 struct lpfc_rqe hrqe;
2219 struct lpfc_rqe drqe;
James Smart895427b2017-02-12 13:52:30 -08002220 struct lpfc_queue *hrq;
2221 struct lpfc_queue *drq;
2222
2223 if (hbqno != LPFC_ELS_HBQ)
2224 return 1;
2225 hrq = phba->sli4_hba.hdr_rq;
2226 drq = phba->sli4_hba.dat_rq;
James Smart4f774512009-05-22 14:52:35 -04002227
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002228 lockdep_assert_held(&phba->hbalock);
James Smart4f774512009-05-22 14:52:35 -04002229 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2230 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2231 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2232 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
James Smart895427b2017-02-12 13:52:30 -08002233 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
James Smart4f774512009-05-22 14:52:35 -04002234 if (rc < 0)
2235 return rc;
James Smart895427b2017-02-12 13:52:30 -08002236 hbq_buf->tag = (rc | (hbqno << 16));
James Smart4f774512009-05-22 14:52:35 -04002237 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2238 return 0;
2239}
2240
James Smarte59058c2008-08-24 21:49:00 -04002241/* HBQ for ELS and CT traffic. */
James Smart92d7f7b2007-06-17 19:56:38 -05002242static struct lpfc_hbq_init lpfc_els_hbq = {
2243 .rn = 1,
James Smartdef9c7a2009-12-21 17:02:28 -05002244 .entry_count = 256,
James Smart92d7f7b2007-06-17 19:56:38 -05002245 .mask_count = 0,
2246 .profile = 0,
James Smart51ef4c22007-08-02 11:10:31 -04002247 .ring_mask = (1 << LPFC_ELS_RING),
James Smart92d7f7b2007-06-17 19:56:38 -05002248 .buffer_count = 0,
James Smarta257bf92009-04-06 18:48:10 -04002249 .init_count = 40,
2250 .add_count = 40,
James Smart92d7f7b2007-06-17 19:56:38 -05002251};
James Smarted957682007-06-17 19:56:37 -05002252
James Smarte59058c2008-08-24 21:49:00 -04002253/* Array of HBQs */
James Smart78b2d852007-08-02 11:10:21 -04002254struct lpfc_hbq_init *lpfc_hbq_defs[] = {
James Smart92d7f7b2007-06-17 19:56:38 -05002255 &lpfc_els_hbq,
2256};
2257
James Smarte59058c2008-08-24 21:49:00 -04002258/**
James Smart3621a712009-04-06 18:47:14 -04002259 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
James Smarte59058c2008-08-24 21:49:00 -04002260 * @phba: Pointer to HBA context object.
2261 * @hbqno: HBQ number.
2262 * @count: Number of HBQ buffers to be posted.
2263 *
James Smartd7c255b2008-08-24 21:50:00 -04002264 * This function is called with no lock held to post more hbq buffers to the
2265 * given HBQ. The function returns the number of HBQ buffers successfully
2266 * posted.
James Smarte59058c2008-08-24 21:49:00 -04002267 **/
James Smart311464e2007-08-02 11:10:37 -04002268static int
James Smart92d7f7b2007-06-17 19:56:38 -05002269lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2270{
James Smartd7c255b2008-08-24 21:50:00 -04002271 uint32_t i, posted = 0;
James Smart3163f722008-02-08 18:50:25 -05002272 unsigned long flags;
James Smart92d7f7b2007-06-17 19:56:38 -05002273 struct hbq_dmabuf *hbq_buffer;
James Smartd7c255b2008-08-24 21:50:00 -04002274 LIST_HEAD(hbq_buf_list);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07002275 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
James Smart51ef4c22007-08-02 11:10:31 -04002276 return 0;
James Smart51ef4c22007-08-02 11:10:31 -04002277
James Smartd7c255b2008-08-24 21:50:00 -04002278 if ((phba->hbqs[hbqno].buffer_count + count) >
2279 lpfc_hbq_defs[hbqno]->entry_count)
2280 count = lpfc_hbq_defs[hbqno]->entry_count -
2281 phba->hbqs[hbqno].buffer_count;
2282 if (!count)
2283 return 0;
2284 /* Allocate HBQ entries */
2285 for (i = 0; i < count; i++) {
2286 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2287 if (!hbq_buffer)
2288 break;
2289 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2290 }
James Smart3163f722008-02-08 18:50:25 -05002291 /* Check whether HBQ is still in use */
2292 spin_lock_irqsave(&phba->hbalock, flags);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07002293 if (!phba->hbq_in_use)
James Smartd7c255b2008-08-24 21:50:00 -04002294 goto err;
2295 while (!list_empty(&hbq_buf_list)) {
2296 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2297 dbuf.list);
2298 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2299 (hbqno << 16));
James Smart3772a992009-05-22 14:50:54 -04002300 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
James Smarta8adb832007-10-27 13:37:53 -04002301 phba->hbqs[hbqno].buffer_count++;
James Smartd7c255b2008-08-24 21:50:00 -04002302 posted++;
2303 } else
James Smart51ef4c22007-08-02 11:10:31 -04002304 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smart92d7f7b2007-06-17 19:56:38 -05002305 }
James Smart3163f722008-02-08 18:50:25 -05002306 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smartd7c255b2008-08-24 21:50:00 -04002307 return posted;
2308err:
2309 spin_unlock_irqrestore(&phba->hbalock, flags);
2310 while (!list_empty(&hbq_buf_list)) {
2311 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2312 dbuf.list);
2313 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2314 }
James Smart92d7f7b2007-06-17 19:56:38 -05002315 return 0;
James Smarted957682007-06-17 19:56:37 -05002316}
2317
James Smarte59058c2008-08-24 21:49:00 -04002318/**
James Smart3621a712009-04-06 18:47:14 -04002319 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
James Smarte59058c2008-08-24 21:49:00 -04002320 * @phba: Pointer to HBA context object.
2321 * @qno: HBQ number.
2322 *
2323 * This function posts more buffers to the HBQ. This function
James Smartd7c255b2008-08-24 21:50:00 -04002324 * is called with no lock held. The function returns the number of HBQ entries
2325 * successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04002326 **/
James Smarted957682007-06-17 19:56:37 -05002327int
James Smart92d7f7b2007-06-17 19:56:38 -05002328lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05002329{
James Smartdef9c7a2009-12-21 17:02:28 -05002330 if (phba->sli_rev == LPFC_SLI_REV4)
2331 return 0;
2332 else
2333 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2334 lpfc_hbq_defs[qno]->add_count);
James Smarted957682007-06-17 19:56:37 -05002335}
2336
James Smarte59058c2008-08-24 21:49:00 -04002337/**
James Smart3621a712009-04-06 18:47:14 -04002338 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
James Smarte59058c2008-08-24 21:49:00 -04002339 * @phba: Pointer to HBA context object.
2340 * @qno: HBQ queue number.
2341 *
2342 * This function is called from SLI initialization code path with
2343 * no lock held to post initial HBQ buffers to firmware. The
James Smartd7c255b2008-08-24 21:50:00 -04002344 * function returns the number of HBQ entries successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04002345 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002346static int
James Smart92d7f7b2007-06-17 19:56:38 -05002347lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05002348{
James Smartdef9c7a2009-12-21 17:02:28 -05002349 if (phba->sli_rev == LPFC_SLI_REV4)
2350 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
James Smart73d91e52011-10-10 21:32:10 -04002351 lpfc_hbq_defs[qno]->entry_count);
James Smartdef9c7a2009-12-21 17:02:28 -05002352 else
2353 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354 lpfc_hbq_defs[qno]->init_count);
James Smarted957682007-06-17 19:56:37 -05002355}
2356
Lee Jones7af29d42020-07-21 17:41:31 +01002357/*
James Smart3772a992009-05-22 14:50:54 -04002358 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
James Smart3772a992009-05-22 14:50:54 -04002359 *
2360 * This function removes the first hbq buffer on an hbq list and returns a
2361 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2362 **/
2363static struct hbq_dmabuf *
2364lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2365{
2366 struct lpfc_dmabuf *d_buf;
2367
2368 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2369 if (!d_buf)
2370 return NULL;
2371 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2372}
2373
2374/**
James Smart2d7dbc42017-02-12 13:52:35 -08002375 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2376 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01002377 * @hrq: HBQ number.
James Smart2d7dbc42017-02-12 13:52:35 -08002378 *
2379 * This function removes the first RQ buffer on an RQ buffer list and returns a
2380 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2381 **/
2382static struct rqb_dmabuf *
2383lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2384{
2385 struct lpfc_dmabuf *h_buf;
2386 struct lpfc_rqb *rqbp;
2387
2388 rqbp = hrq->rqbp;
2389 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2390 struct lpfc_dmabuf, list);
2391 if (!h_buf)
2392 return NULL;
2393 rqbp->buffer_count--;
2394 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2395}
2396
2397/**
James Smart3621a712009-04-06 18:47:14 -04002398 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
James Smarte59058c2008-08-24 21:49:00 -04002399 * @phba: Pointer to HBA context object.
2400 * @tag: Tag of the hbq buffer.
2401 *
Sebastian Herbszt71892412016-04-17 13:27:27 +02002402 * This function searches for the hbq buffer associated with the given tag in
2403 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2404 * otherwise it returns NULL.
James Smarte59058c2008-08-24 21:49:00 -04002405 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002406static struct hbq_dmabuf *
James Smarted957682007-06-17 19:56:37 -05002407lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2408{
James Smart92d7f7b2007-06-17 19:56:38 -05002409 struct lpfc_dmabuf *d_buf;
2410 struct hbq_dmabuf *hbq_buf;
James Smart51ef4c22007-08-02 11:10:31 -04002411 uint32_t hbqno;
James Smarted957682007-06-17 19:56:37 -05002412
James Smart51ef4c22007-08-02 11:10:31 -04002413 hbqno = tag >> 16;
Jesper Juhla0a74e452007-08-09 20:47:15 +02002414 if (hbqno >= LPFC_MAX_HBQS)
James Smart51ef4c22007-08-02 11:10:31 -04002415 return NULL;
2416
James Smart3772a992009-05-22 14:50:54 -04002417 spin_lock_irq(&phba->hbalock);
James Smart51ef4c22007-08-02 11:10:31 -04002418 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
James Smart92d7f7b2007-06-17 19:56:38 -05002419 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
James Smart51ef4c22007-08-02 11:10:31 -04002420 if (hbq_buf->tag == tag) {
James Smart3772a992009-05-22 14:50:54 -04002421 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002422 return hbq_buf;
James Smarted957682007-06-17 19:56:37 -05002423 }
2424 }
James Smart3772a992009-05-22 14:50:54 -04002425 spin_unlock_irq(&phba->hbalock);
Dick Kennedy372c1872020-06-30 14:50:00 -07002426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04002427 "1803 Bad hbq tag. Data: x%x x%x\n",
James Smarta8adb832007-10-27 13:37:53 -04002428 tag, phba->hbqs[tag >> 16].buffer_count);
James Smart92d7f7b2007-06-17 19:56:38 -05002429 return NULL;
James Smarted957682007-06-17 19:56:37 -05002430}
2431
James Smarte59058c2008-08-24 21:49:00 -04002432/**
James Smart3621a712009-04-06 18:47:14 -04002433 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04002434 * @phba: Pointer to HBA context object.
2435 * @hbq_buffer: Pointer to HBQ buffer.
2436 *
2437 * This function is called with hbalock. This function gives back
2438 * the hbq buffer to firmware. If the HBQ does not have space to
2439 * post the buffer, it will free the buffer.
2440 **/
James Smarted957682007-06-17 19:56:37 -05002441void
James Smart51ef4c22007-08-02 11:10:31 -04002442lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
James Smarted957682007-06-17 19:56:37 -05002443{
2444 uint32_t hbqno;
2445
James Smart51ef4c22007-08-02 11:10:31 -04002446 if (hbq_buffer) {
2447 hbqno = hbq_buffer->tag >> 16;
James Smart3772a992009-05-22 14:50:54 -04002448 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
James Smart51ef4c22007-08-02 11:10:31 -04002449 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smarted957682007-06-17 19:56:37 -05002450 }
2451}
2452
James Smarte59058c2008-08-24 21:49:00 -04002453/**
James Smart3621a712009-04-06 18:47:14 -04002454 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
James Smarte59058c2008-08-24 21:49:00 -04002455 * @mbxCommand: mailbox command code.
2456 *
2457 * This function is called by the mailbox event handler function to verify
2458 * that the completed mailbox command is a legitimate mailbox command. If the
2459 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2460 * and the mailbox event handler will take the HBA offline.
2461 **/
dea31012005-04-17 16:05:31 -05002462static int
2463lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2464{
2465 uint8_t ret;
2466
2467 switch (mbxCommand) {
2468 case MBX_LOAD_SM:
2469 case MBX_READ_NV:
2470 case MBX_WRITE_NV:
James Smarta8adb832007-10-27 13:37:53 -04002471 case MBX_WRITE_VPARMS:
dea31012005-04-17 16:05:31 -05002472 case MBX_RUN_BIU_DIAG:
2473 case MBX_INIT_LINK:
2474 case MBX_DOWN_LINK:
2475 case MBX_CONFIG_LINK:
2476 case MBX_CONFIG_RING:
2477 case MBX_RESET_RING:
2478 case MBX_READ_CONFIG:
2479 case MBX_READ_RCONFIG:
2480 case MBX_READ_SPARM:
2481 case MBX_READ_STATUS:
2482 case MBX_READ_RPI:
2483 case MBX_READ_XRI:
2484 case MBX_READ_REV:
2485 case MBX_READ_LNK_STAT:
2486 case MBX_REG_LOGIN:
2487 case MBX_UNREG_LOGIN:
dea31012005-04-17 16:05:31 -05002488 case MBX_CLEAR_LA:
2489 case MBX_DUMP_MEMORY:
2490 case MBX_DUMP_CONTEXT:
2491 case MBX_RUN_DIAGS:
2492 case MBX_RESTART:
2493 case MBX_UPDATE_CFG:
2494 case MBX_DOWN_LOAD:
2495 case MBX_DEL_LD_ENTRY:
2496 case MBX_RUN_PROGRAM:
2497 case MBX_SET_MASK:
James Smart09372822008-01-11 01:52:54 -05002498 case MBX_SET_VARIABLE:
dea31012005-04-17 16:05:31 -05002499 case MBX_UNREG_D_ID:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002500 case MBX_KILL_BOARD:
dea31012005-04-17 16:05:31 -05002501 case MBX_CONFIG_FARP:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002502 case MBX_BEACON:
dea31012005-04-17 16:05:31 -05002503 case MBX_LOAD_AREA:
2504 case MBX_RUN_BIU_DIAG64:
2505 case MBX_CONFIG_PORT:
2506 case MBX_READ_SPARM64:
2507 case MBX_READ_RPI64:
2508 case MBX_REG_LOGIN64:
James Smart76a95d72010-11-20 23:11:48 -05002509 case MBX_READ_TOPOLOGY:
James Smart09372822008-01-11 01:52:54 -05002510 case MBX_WRITE_WWN:
dea31012005-04-17 16:05:31 -05002511 case MBX_SET_DEBUG:
2512 case MBX_LOAD_EXP_ROM:
James Smart57127f12007-10-27 13:37:05 -04002513 case MBX_ASYNCEVT_ENABLE:
James Smart92d7f7b2007-06-17 19:56:38 -05002514 case MBX_REG_VPI:
2515 case MBX_UNREG_VPI:
James Smart858c9f62007-06-17 19:56:39 -05002516 case MBX_HEARTBEAT:
James Smart84774a42008-08-24 21:50:06 -04002517 case MBX_PORT_CAPABILITIES:
2518 case MBX_PORT_IOV_CONTROL:
James Smart04c68492009-05-22 14:52:52 -04002519 case MBX_SLI4_CONFIG:
2520 case MBX_SLI4_REQ_FTRS:
2521 case MBX_REG_FCFI:
2522 case MBX_UNREG_FCFI:
2523 case MBX_REG_VFI:
2524 case MBX_UNREG_VFI:
2525 case MBX_INIT_VPI:
2526 case MBX_INIT_VFI:
2527 case MBX_RESUME_RPI:
James Smartc7495932010-04-06 15:05:28 -04002528 case MBX_READ_EVENT_LOG_STATUS:
2529 case MBX_READ_EVENT_LOG:
James Smartdcf2a4e2010-09-29 11:18:53 -04002530 case MBX_SECURITY_MGMT:
2531 case MBX_AUTH_PORT:
James Smart940eb682012-08-03 12:37:08 -04002532 case MBX_ACCESS_VDATA:
dea31012005-04-17 16:05:31 -05002533 ret = mbxCommand;
2534 break;
2535 default:
2536 ret = MBX_SHUTDOWN;
2537 break;
2538 }
James Smart2e0fef82007-06-17 19:56:36 -05002539 return ret;
dea31012005-04-17 16:05:31 -05002540}
James Smarte59058c2008-08-24 21:49:00 -04002541
2542/**
James Smart3621a712009-04-06 18:47:14 -04002543 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002544 * @phba: Pointer to HBA context object.
2545 * @pmboxq: Pointer to mailbox command.
2546 *
2547 * This is completion handler function for mailbox commands issued from
2548 * lpfc_sli_issue_mbox_wait function. This function is called by the
2549 * mailbox event handler function with no lock held. This function
2550 * will wake up thread waiting on the wait queue pointed by context1
2551 * of the mailbox.
2552 **/
James Smart04c68492009-05-22 14:52:52 -04002553void
James Smart2e0fef82007-06-17 19:56:36 -05002554lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea31012005-04-17 16:05:31 -05002555{
James Smart858c9f62007-06-17 19:56:39 -05002556 unsigned long drvr_flag;
James Smarte29d74f2018-03-05 12:04:07 -08002557 struct completion *pmbox_done;
dea31012005-04-17 16:05:31 -05002558
2559 /*
James Smarte29d74f2018-03-05 12:04:07 -08002560 * If pmbox_done is empty, the driver thread gave up waiting and
dea31012005-04-17 16:05:31 -05002561 * continued running.
2562 */
James Smart7054a602007-04-25 09:52:34 -04002563 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
James Smart858c9f62007-06-17 19:56:39 -05002564 spin_lock_irqsave(&phba->hbalock, drvr_flag);
James Smarte29d74f2018-03-05 12:04:07 -08002565 pmbox_done = (struct completion *)pmboxq->context3;
2566 if (pmbox_done)
2567 complete(pmbox_done);
James Smart858c9f62007-06-17 19:56:39 -05002568 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002569 return;
2570}
2571
James Smartb95b2112019-08-14 16:56:47 -07002572static void
2573__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2574{
2575 unsigned long iflags;
2576
2577 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2578 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
James Smartc6adba12020-11-15 11:26:34 -08002579 spin_lock_irqsave(&ndlp->lock, iflags);
James Smartb95b2112019-08-14 16:56:47 -07002580 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2581 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
James Smartc6adba12020-11-15 11:26:34 -08002582 spin_unlock_irqrestore(&ndlp->lock, iflags);
James Smartb95b2112019-08-14 16:56:47 -07002583 }
2584 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2585}
James Smarte59058c2008-08-24 21:49:00 -04002586
2587/**
James Smart3621a712009-04-06 18:47:14 -04002588 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002589 * @phba: Pointer to HBA context object.
2590 * @pmb: Pointer to mailbox object.
2591 *
2592 * This function is the default mailbox completion handler. It
2593 * frees the memory resources associated with the completed mailbox
2594 * command. If the completed command is a REG_LOGIN mailbox command,
2595 * this function will issue a UREG_LOGIN to re-claim the RPI.
2596 **/
dea31012005-04-17 16:05:31 -05002597void
James Smart2e0fef82007-06-17 19:56:36 -05002598lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea31012005-04-17 16:05:31 -05002599{
James Smartd439d282010-09-29 11:18:45 -04002600 struct lpfc_vport *vport = pmb->vport;
dea31012005-04-17 16:05:31 -05002601 struct lpfc_dmabuf *mp;
James Smartd439d282010-09-29 11:18:45 -04002602 struct lpfc_nodelist *ndlp;
James Smart5af5eee2010-10-22 11:06:38 -04002603 struct Scsi_Host *shost;
James Smart04c68492009-05-22 14:52:52 -04002604 uint16_t rpi, vpi;
James Smart7054a602007-04-25 09:52:34 -04002605 int rc;
2606
James Smart3e1f0712018-11-29 16:09:29 -08002607 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
James Smart7054a602007-04-25 09:52:34 -04002608
dea31012005-04-17 16:05:31 -05002609 if (mp) {
2610 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2611 kfree(mp);
2612 }
James Smart7054a602007-04-25 09:52:34 -04002613
2614 /*
2615 * If a REG_LOGIN succeeded after node is destroyed or node
2616 * is in re-discovery driver need to cleanup the RPI.
2617 */
James Smart2e0fef82007-06-17 19:56:36 -05002618 if (!(phba->pport->load_flag & FC_UNLOADING) &&
James Smart04c68492009-05-22 14:52:52 -04002619 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2620 !pmb->u.mb.mbxStatus) {
2621 rpi = pmb->u.mb.un.varWords[0];
James Smart6d368e52011-05-24 11:44:12 -04002622 vpi = pmb->u.mb.un.varRegLogin.vpi;
James Smart38503942020-03-22 11:12:53 -07002623 if (phba->sli_rev == LPFC_SLI_REV4)
2624 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
James Smart04c68492009-05-22 14:52:52 -04002625 lpfc_unreg_login(phba, vpi, rpi, pmb);
James Smartde96e9c2016-03-31 14:12:27 -07002626 pmb->vport = vport;
James Smart92d7f7b2007-06-17 19:56:38 -05002627 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart7054a602007-04-25 09:52:34 -04002628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2629 if (rc != MBX_NOT_FINISHED)
2630 return;
2631 }
2632
James Smart695a8142010-01-26 23:08:03 -05002633 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2634 !(phba->pport->load_flag & FC_UNLOADING) &&
2635 !pmb->u.mb.mbxStatus) {
James Smart5af5eee2010-10-22 11:06:38 -04002636 shost = lpfc_shost_from_vport(vport);
2637 spin_lock_irq(shost->host_lock);
2638 vport->vpi_state |= LPFC_VPI_REGISTERED;
2639 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2640 spin_unlock_irq(shost->host_lock);
James Smart695a8142010-01-26 23:08:03 -05002641 }
2642
James Smartd439d282010-09-29 11:18:45 -04002643 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -08002644 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
James Smartd439d282010-09-29 11:18:45 -04002645 lpfc_nlp_put(ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002646 pmb->ctx_buf = NULL;
2647 pmb->ctx_ndlp = NULL;
2648 }
2649
2650 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2651 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2652
2653 /* Check to see if there are any deferred events to process */
2654 if (ndlp) {
2655 lpfc_printf_vlog(
2656 vport,
2657 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2658 "1438 UNREG cmpl deferred mbox x%x "
James Smartf1156122021-04-11 18:31:24 -07002659 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
James Smartdea16bd2018-11-29 16:09:30 -08002660 ndlp->nlp_rpi, ndlp->nlp_DID,
James Smarte9b11082020-11-15 11:26:33 -08002661 ndlp->nlp_flag, ndlp->nlp_defer_did,
2662 ndlp, vport->load_flag, kref_read(&ndlp->kref));
James Smartdea16bd2018-11-29 16:09:30 -08002663
2664 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2665 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
James Smart00292e02018-12-13 15:17:55 -08002666 ndlp->nlp_flag &= ~NLP_UNREG_INP;
James Smartdea16bd2018-11-29 16:09:30 -08002667 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2668 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
James Smart00292e02018-12-13 15:17:55 -08002669 } else {
James Smartb95b2112019-08-14 16:56:47 -07002670 __lpfc_sli_rpi_release(vport, ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002671 }
James Smarta70e63e2020-11-15 11:26:38 -08002672
2673 /* The unreg_login mailbox is complete and had a
2674 * reference that has to be released. The PLOGI
2675 * got its own ref.
2676 */
2677 lpfc_nlp_put(ndlp);
James Smart9b164062019-03-12 16:30:06 -07002678 pmb->ctx_ndlp = NULL;
James Smartdea16bd2018-11-29 16:09:30 -08002679 }
James Smartd439d282010-09-29 11:18:45 -04002680 }
2681
James Smart1037e4b2021-05-14 12:55:52 -07002682 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2683 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2684 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2685 lpfc_nlp_put(ndlp);
2686 }
2687
James Smartdcf2a4e2010-09-29 11:18:53 -04002688 /* Check security permission status on INIT_LINK mailbox command */
2689 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2690 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
Dick Kennedy372c1872020-06-30 14:50:00 -07002691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartdcf2a4e2010-09-29 11:18:53 -04002692 "2860 SLI authentication is required "
2693 "for INIT_LINK but has not done yet\n");
2694
James Smart04c68492009-05-22 14:52:52 -04002695 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2696 lpfc_sli4_mbox_cmd_free(phba, pmb);
2697 else
2698 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -05002699}
James Smartbe6bb942015-04-07 15:07:22 -04002700 /**
2701 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2702 * @phba: Pointer to HBA context object.
2703 * @pmb: Pointer to mailbox object.
2704 *
2705 * This function is the unreg rpi mailbox completion handler. It
2706 * frees the memory resources associated with the completed mailbox
James Smarta70e63e2020-11-15 11:26:38 -08002707 * command. An additional reference is put on the ndlp to prevent
James Smartbe6bb942015-04-07 15:07:22 -04002708 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2709 * the unreg mailbox command completes, this routine puts the
2710 * reference back.
2711 *
2712 **/
2713void
2714lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715{
2716 struct lpfc_vport *vport = pmb->vport;
2717 struct lpfc_nodelist *ndlp;
2718
James Smart3e1f0712018-11-29 16:09:29 -08002719 ndlp = pmb->ctx_ndlp;
James Smartbe6bb942015-04-07 15:07:22 -04002720 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2721 if (phba->sli_rev == LPFC_SLI_REV4 &&
2722 (bf_get(lpfc_sli_intf_if_type,
James Smart27d6ac02018-02-22 08:18:42 -08002723 &phba->sli4_hba.sli_intf) >=
James Smartbe6bb942015-04-07 15:07:22 -04002724 LPFC_SLI_INTF_IF_TYPE_2)) {
2725 if (ndlp) {
James Smartdea16bd2018-11-29 16:09:30 -08002726 lpfc_printf_vlog(
James Smarte9b11082020-11-15 11:26:33 -08002727 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smartdea16bd2018-11-29 16:09:30 -08002728 "0010 UNREG_LOGIN vpi:%x "
2729 "rpi:%x DID:%x defer x%x flg x%x "
James Smartf1156122021-04-11 18:31:24 -07002730 "x%px\n",
James Smartdea16bd2018-11-29 16:09:30 -08002731 vport->vpi, ndlp->nlp_rpi,
2732 ndlp->nlp_DID, ndlp->nlp_defer_did,
2733 ndlp->nlp_flag,
James Smart307e3382020-11-15 11:26:30 -08002734 ndlp);
James Smart7c5e5182015-05-22 10:42:43 -04002735 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
James Smartdea16bd2018-11-29 16:09:30 -08002736
2737 /* Check to see if there are any deferred
2738 * events to process
2739 */
2740 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2741 (ndlp->nlp_defer_did !=
2742 NLP_EVT_NOTHING_PENDING)) {
2743 lpfc_printf_vlog(
2744 vport, KERN_INFO, LOG_DISCOVERY,
2745 "4111 UNREG cmpl deferred "
2746 "clr x%x on "
James Smart32350662019-08-14 16:57:06 -07002747 "NPort x%x Data: x%x x%px\n",
James Smartdea16bd2018-11-29 16:09:30 -08002748 ndlp->nlp_rpi, ndlp->nlp_DID,
2749 ndlp->nlp_defer_did, ndlp);
James Smart00292e02018-12-13 15:17:55 -08002750 ndlp->nlp_flag &= ~NLP_UNREG_INP;
James Smartdea16bd2018-11-29 16:09:30 -08002751 ndlp->nlp_defer_did =
2752 NLP_EVT_NOTHING_PENDING;
2753 lpfc_issue_els_plogi(
2754 vport, ndlp->nlp_DID, 0);
James Smart00292e02018-12-13 15:17:55 -08002755 } else {
James Smartb95b2112019-08-14 16:56:47 -07002756 __lpfc_sli_rpi_release(vport, ndlp);
James Smartdea16bd2018-11-29 16:09:30 -08002757 }
James Smart4430f7f2020-11-15 11:26:31 -08002758 lpfc_nlp_put(ndlp);
James Smartbe6bb942015-04-07 15:07:22 -04002759 }
2760 }
2761 }
2762
2763 mempool_free(pmb, phba->mbox_mem_pool);
2764}
dea31012005-04-17 16:05:31 -05002765
James Smarte59058c2008-08-24 21:49:00 -04002766/**
James Smart3621a712009-04-06 18:47:14 -04002767 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
James Smarte59058c2008-08-24 21:49:00 -04002768 * @phba: Pointer to HBA context object.
2769 *
2770 * This function is called with no lock held. This function processes all
2771 * the completed mailbox commands and gives it to upper layers. The interrupt
2772 * service routine processes mailbox completion interrupt and adds completed
2773 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2774 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2775 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2776 * function returns the mailbox commands to the upper layer by calling the
2777 * completion handler function of each mailbox.
2778 **/
dea31012005-04-17 16:05:31 -05002779int
James Smart2e0fef82007-06-17 19:56:36 -05002780lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002781{
James Smart92d7f7b2007-06-17 19:56:38 -05002782 MAILBOX_t *pmbox;
dea31012005-04-17 16:05:31 -05002783 LPFC_MBOXQ_t *pmb;
James Smart92d7f7b2007-06-17 19:56:38 -05002784 int rc;
2785 LIST_HEAD(cmplq);
dea31012005-04-17 16:05:31 -05002786
2787 phba->sli.slistat.mbox_event++;
2788
James Smart92d7f7b2007-06-17 19:56:38 -05002789 /* Get all completed mailboxe buffers into the cmplq */
2790 spin_lock_irq(&phba->hbalock);
2791 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2792 spin_unlock_irq(&phba->hbalock);
2793
dea31012005-04-17 16:05:31 -05002794 /* Get a Mailbox buffer to setup mailbox commands for callback */
James Smart92d7f7b2007-06-17 19:56:38 -05002795 do {
2796 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2797 if (pmb == NULL)
2798 break;
2799
James Smart04c68492009-05-22 14:52:52 -04002800 pmbox = &pmb->u.mb;
dea31012005-04-17 16:05:31 -05002801
James Smart858c9f62007-06-17 19:56:39 -05002802 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2803 if (pmb->vport) {
2804 lpfc_debugfs_disc_trc(pmb->vport,
2805 LPFC_DISC_TRC_MBOX_VPORT,
2806 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2807 (uint32_t)pmbox->mbxCommand,
2808 pmbox->un.varWords[0],
2809 pmbox->un.varWords[1]);
2810 }
2811 else {
2812 lpfc_debugfs_disc_trc(phba->pport,
2813 LPFC_DISC_TRC_MBOX,
2814 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2815 (uint32_t)pmbox->mbxCommand,
2816 pmbox->un.varWords[0],
2817 pmbox->un.varWords[1]);
2818 }
2819 }
2820
dea31012005-04-17 16:05:31 -05002821 /*
2822 * It is a fatal error if unknown mbox command completion.
2823 */
2824 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2825 MBX_SHUTDOWN) {
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002826 /* Unknown mailbox command compl */
Dick Kennedy372c1872020-06-30 14:50:00 -07002827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04002828 "(%d):0323 Unknown Mailbox command "
James Smarta183a152011-10-10 21:32:43 -04002829 "x%x (x%x/x%x) Cmpl\n",
James Smart43bfea12019-09-21 20:58:57 -07002830 pmb->vport ? pmb->vport->vpi :
2831 LPFC_VPORT_UNKNOWN,
James Smart04c68492009-05-22 14:52:52 -04002832 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002833 lpfc_sli_config_mbox_subsys_get(phba,
2834 pmb),
2835 lpfc_sli_config_mbox_opcode_get(phba,
2836 pmb));
James Smart2e0fef82007-06-17 19:56:36 -05002837 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05002838 phba->work_hs = HS_FFER3;
2839 lpfc_handle_eratt(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05002840 continue;
dea31012005-04-17 16:05:31 -05002841 }
2842
dea31012005-04-17 16:05:31 -05002843 if (pmbox->mbxStatus) {
2844 phba->sli.slistat.mbox_stat_err++;
2845 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2846 /* Mbox cmd cmpl error - RETRYing */
James Smart92d7f7b2007-06-17 19:56:38 -05002847 lpfc_printf_log(phba, KERN_INFO,
James Smarta183a152011-10-10 21:32:43 -04002848 LOG_MBOX | LOG_SLI,
2849 "(%d):0305 Mbox cmd cmpl "
2850 "error - RETRYing Data: x%x "
2851 "(x%x/x%x) x%x x%x x%x\n",
James Smart43bfea12019-09-21 20:58:57 -07002852 pmb->vport ? pmb->vport->vpi :
2853 LPFC_VPORT_UNKNOWN,
James Smarta183a152011-10-10 21:32:43 -04002854 pmbox->mbxCommand,
2855 lpfc_sli_config_mbox_subsys_get(phba,
2856 pmb),
2857 lpfc_sli_config_mbox_opcode_get(phba,
2858 pmb),
2859 pmbox->mbxStatus,
2860 pmbox->un.varWords[0],
James Smart43bfea12019-09-21 20:58:57 -07002861 pmb->vport ? pmb->vport->port_state :
2862 LPFC_VPORT_UNKNOWN);
dea31012005-04-17 16:05:31 -05002863 pmbox->mbxStatus = 0;
2864 pmbox->mbxOwner = OWN_HOST;
dea31012005-04-17 16:05:31 -05002865 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
James Smart04c68492009-05-22 14:52:52 -04002866 if (rc != MBX_NOT_FINISHED)
James Smart92d7f7b2007-06-17 19:56:38 -05002867 continue;
dea31012005-04-17 16:05:31 -05002868 }
2869 }
2870
2871 /* Mailbox cmd <cmd> Cmpl <cmpl> */
James Smart92d7f7b2007-06-17 19:56:38 -05002872 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
Sakari Ailus2d44d162019-09-04 19:04:23 +03002873 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
James Smarte74c03c2013-04-17 20:15:19 -04002874 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2875 "x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002876 pmb->vport ? pmb->vport->vpi : 0,
dea31012005-04-17 16:05:31 -05002877 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002878 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2879 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea31012005-04-17 16:05:31 -05002880 pmb->mbox_cmpl,
2881 *((uint32_t *) pmbox),
2882 pmbox->un.varWords[0],
2883 pmbox->un.varWords[1],
2884 pmbox->un.varWords[2],
2885 pmbox->un.varWords[3],
2886 pmbox->un.varWords[4],
2887 pmbox->un.varWords[5],
2888 pmbox->un.varWords[6],
James Smarte74c03c2013-04-17 20:15:19 -04002889 pmbox->un.varWords[7],
2890 pmbox->un.varWords[8],
2891 pmbox->un.varWords[9],
2892 pmbox->un.varWords[10]);
dea31012005-04-17 16:05:31 -05002893
James Smart92d7f7b2007-06-17 19:56:38 -05002894 if (pmb->mbox_cmpl)
dea31012005-04-17 16:05:31 -05002895 pmb->mbox_cmpl(phba,pmb);
James Smart92d7f7b2007-06-17 19:56:38 -05002896 } while (1);
James Smart2e0fef82007-06-17 19:56:36 -05002897 return 0;
dea31012005-04-17 16:05:31 -05002898}
James Smart92d7f7b2007-06-17 19:56:38 -05002899
James Smarte59058c2008-08-24 21:49:00 -04002900/**
James Smart3621a712009-04-06 18:47:14 -04002901 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
James Smarte59058c2008-08-24 21:49:00 -04002902 * @phba: Pointer to HBA context object.
2903 * @pring: Pointer to driver SLI ring object.
2904 * @tag: buffer tag.
2905 *
2906 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2907 * is set in the tag the buffer is posted for a particular exchange,
2908 * the function will return the buffer without replacing the buffer.
2909 * If the buffer is for unsolicited ELS or CT traffic, this function
2910 * returns the buffer and also posts another buffer to the firmware.
2911 **/
James Smart76bb24e2007-10-27 13:38:00 -04002912static struct lpfc_dmabuf *
2913lpfc_sli_get_buff(struct lpfc_hba *phba,
James Smart9f1e1b52008-12-04 22:39:40 -05002914 struct lpfc_sli_ring *pring,
2915 uint32_t tag)
James Smart76bb24e2007-10-27 13:38:00 -04002916{
James Smart9f1e1b52008-12-04 22:39:40 -05002917 struct hbq_dmabuf *hbq_entry;
2918
James Smart76bb24e2007-10-27 13:38:00 -04002919 if (tag & QUE_BUFTAG_BIT)
2920 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
James Smart9f1e1b52008-12-04 22:39:40 -05002921 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2922 if (!hbq_entry)
2923 return NULL;
2924 return &hbq_entry->dbuf;
James Smart76bb24e2007-10-27 13:38:00 -04002925}
James Smart57127f12007-10-27 13:37:05 -04002926
James Smart3772a992009-05-22 14:50:54 -04002927/**
James Smart3a8070c2020-03-31 09:50:05 -07002928 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2929 * containing a NVME LS request.
2930 * @phba: pointer to lpfc hba data structure.
2931 * @piocb: pointer to the iocbq struct representing the sequence starting
2932 * frame.
2933 *
2934 * This routine initially validates the NVME LS, validates there is a login
2935 * with the port that sent the LS, and then calls the appropriate nvme host
2936 * or target LS request handler.
2937 **/
2938static void
2939lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2940{
2941 struct lpfc_nodelist *ndlp;
2942 struct lpfc_dmabuf *d_buf;
2943 struct hbq_dmabuf *nvmebuf;
2944 struct fc_frame_header *fc_hdr;
2945 struct lpfc_async_xchg_ctx *axchg = NULL;
2946 char *failwhy = NULL;
2947 uint32_t oxid, sid, did, fctl, size;
James Smart4e57e0b2020-05-20 11:59:28 -07002948 int ret = 1;
James Smart3a8070c2020-03-31 09:50:05 -07002949
2950 d_buf = piocb->context2;
2951
2952 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2953 fc_hdr = nvmebuf->hbuf.virt;
2954 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2955 sid = sli4_sid_from_fc_hdr(fc_hdr);
2956 did = sli4_did_from_fc_hdr(fc_hdr);
2957 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2958 fc_hdr->fh_f_ctl[1] << 8 |
2959 fc_hdr->fh_f_ctl[2]);
2960 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2961
2962 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2963 oxid, size, sid);
2964
2965 if (phba->pport->load_flag & FC_UNLOADING) {
2966 failwhy = "Driver Unloading";
2967 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2968 failwhy = "NVME FC4 Disabled";
2969 } else if (!phba->nvmet_support && !phba->pport->localport) {
2970 failwhy = "No Localport";
2971 } else if (phba->nvmet_support && !phba->targetport) {
2972 failwhy = "No Targetport";
2973 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2974 failwhy = "Bad NVME LS R_CTL";
2975 } else if (unlikely((fctl & 0x00FF0000) !=
2976 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2977 failwhy = "Bad NVME LS F_CTL";
2978 } else {
2979 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2980 if (!axchg)
2981 failwhy = "No CTX memory";
2982 }
2983
2984 if (unlikely(failwhy)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07002985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3a8070c2020-03-31 09:50:05 -07002986 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2987 sid, oxid, failwhy);
2988 goto out_fail;
2989 }
2990
2991 /* validate the source of the LS is logged in */
2992 ndlp = lpfc_findnode_did(phba->pport, sid);
James Smart307e3382020-11-15 11:26:30 -08002993 if (!ndlp ||
James Smart3a8070c2020-03-31 09:50:05 -07002994 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2995 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2996 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2997 "6216 NVME Unsol rcv: No ndlp: "
2998 "NPort_ID x%x oxid x%x\n",
2999 sid, oxid);
3000 goto out_fail;
3001 }
3002
3003 axchg->phba = phba;
3004 axchg->ndlp = ndlp;
3005 axchg->size = size;
3006 axchg->oxid = oxid;
3007 axchg->sid = sid;
3008 axchg->wqeq = NULL;
3009 axchg->state = LPFC_NVME_STE_LS_RCV;
3010 axchg->entry_cnt = 1;
3011 axchg->rqb_buffer = (void *)nvmebuf;
3012 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3013 axchg->payload = nvmebuf->dbuf.virt;
3014 INIT_LIST_HEAD(&axchg->list);
3015
James Smart243156c2021-01-04 10:02:37 -08003016 if (phba->nvmet_support) {
James Smart3a8070c2020-03-31 09:50:05 -07003017 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
James Smart243156c2021-01-04 10:02:37 -08003018 spin_lock_irq(&ndlp->lock);
3019 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3020 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3021 spin_unlock_irq(&ndlp->lock);
3022
3023 /* This reference is a single occurrence to hold the
3024 * node valid until the nvmet transport calls
3025 * host_release.
3026 */
3027 if (!lpfc_nlp_get(ndlp))
3028 goto out_fail;
3029
3030 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
James Smartf1156122021-04-11 18:31:24 -07003031 "6206 NVMET unsol ls_req ndlp x%px "
James Smart243156c2021-01-04 10:02:37 -08003032 "DID x%x xflags x%x refcnt %d\n",
3033 ndlp, ndlp->nlp_DID,
3034 ndlp->fc4_xpt_flags,
3035 kref_read(&ndlp->kref));
3036 } else {
3037 spin_unlock_irq(&ndlp->lock);
3038 }
3039 } else {
James Smart3a8070c2020-03-31 09:50:05 -07003040 ret = lpfc_nvme_handle_lsreq(phba, axchg);
James Smart243156c2021-01-04 10:02:37 -08003041 }
James Smart3a8070c2020-03-31 09:50:05 -07003042
3043 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3044 if (!ret)
3045 return;
3046
James Smart243156c2021-01-04 10:02:37 -08003047out_fail:
Dick Kennedy372c1872020-06-30 14:50:00 -07003048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3a8070c2020-03-31 09:50:05 -07003049 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3050 "NVMe%s handler failed %d\n",
3051 did, sid, oxid,
3052 (phba->nvmet_support) ? "T" : "I", ret);
3053
James Smart3a8070c2020-03-31 09:50:05 -07003054 /* recycle receive buffer */
3055 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3056
3057 /* If start of new exchange, abort it */
James Smart4e57e0b2020-05-20 11:59:28 -07003058 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3059 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3060
3061 if (ret)
3062 kfree(axchg);
James Smart3a8070c2020-03-31 09:50:05 -07003063}
3064
3065/**
James Smart3772a992009-05-22 14:50:54 -04003066 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3067 * @phba: Pointer to HBA context object.
3068 * @pring: Pointer to driver SLI ring object.
3069 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3070 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3071 * @fch_type: the type for the first frame of the sequence.
3072 *
3073 * This function is called with no lock held. This function uses the r_ctl and
3074 * type of the received sequence to find the correct callback function to call
3075 * to process the sequence.
3076 **/
3077static int
3078lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3080 uint32_t fch_type)
3081{
3082 int i;
3083
James Smartf358dd02017-02-12 13:52:34 -08003084 switch (fch_type) {
3085 case FC_TYPE_NVME:
James Smart3a8070c2020-03-31 09:50:05 -07003086 lpfc_nvme_unsol_ls_handler(phba, saveq);
James Smartf358dd02017-02-12 13:52:34 -08003087 return 1;
3088 default:
3089 break;
3090 }
3091
James Smart3772a992009-05-22 14:50:54 -04003092 /* unSolicited Responses */
3093 if (pring->prt[0].profile) {
3094 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3095 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3096 saveq);
3097 return 1;
3098 }
3099 /* We must search, based on rctl / type
3100 for the right routine */
3101 for (i = 0; i < pring->num_mask; i++) {
3102 if ((pring->prt[i].rctl == fch_r_ctl) &&
3103 (pring->prt[i].type == fch_type)) {
3104 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3105 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3106 (phba, pring, saveq);
3107 return 1;
3108 }
3109 }
3110 return 0;
3111}
James Smarte59058c2008-08-24 21:49:00 -04003112
3113/**
James Smart3621a712009-04-06 18:47:14 -04003114 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
James Smarte59058c2008-08-24 21:49:00 -04003115 * @phba: Pointer to HBA context object.
3116 * @pring: Pointer to driver SLI ring object.
3117 * @saveq: Pointer to the unsolicited iocb.
3118 *
3119 * This function is called with no lock held by the ring event handler
3120 * when there is an unsolicited iocb posted to the response ring by the
3121 * firmware. This function gets the buffer associated with the iocbs
3122 * and calls the event handler for the ring. This function handles both
3123 * qring buffers and hbq buffers.
3124 * When the function returns 1 the caller can free the iocb object otherwise
3125 * upper layer functions will free the iocb objects.
3126 **/
dea31012005-04-17 16:05:31 -05003127static int
3128lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3129 struct lpfc_iocbq *saveq)
3130{
3131 IOCB_t * irsp;
3132 WORD5 * w5p;
3133 uint32_t Rctl, Type;
James Smart76bb24e2007-10-27 13:38:00 -04003134 struct lpfc_iocbq *iocbq;
James Smart3163f722008-02-08 18:50:25 -05003135 struct lpfc_dmabuf *dmzbuf;
dea31012005-04-17 16:05:31 -05003136
dea31012005-04-17 16:05:31 -05003137 irsp = &(saveq->iocb);
James Smart57127f12007-10-27 13:37:05 -04003138
3139 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3140 if (pring->lpfc_sli_rcv_async_status)
3141 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3142 else
3143 lpfc_printf_log(phba,
3144 KERN_WARNING,
3145 LOG_SLI,
3146 "0316 Ring %d handler: unexpected "
3147 "ASYNC_STATUS iocb received evt_code "
3148 "0x%x\n",
3149 pring->ringno,
3150 irsp->un.asyncstat.evt_code);
3151 return 1;
3152 }
3153
James Smart3163f722008-02-08 18:50:25 -05003154 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3155 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3156 if (irsp->ulpBdeCount > 0) {
3157 dmzbuf = lpfc_sli_get_buff(phba, pring,
3158 irsp->un.ulpWord[3]);
3159 lpfc_in_buf_free(phba, dmzbuf);
3160 }
3161
3162 if (irsp->ulpBdeCount > 1) {
3163 dmzbuf = lpfc_sli_get_buff(phba, pring,
3164 irsp->unsli3.sli3Words[3]);
3165 lpfc_in_buf_free(phba, dmzbuf);
3166 }
3167
3168 if (irsp->ulpBdeCount > 2) {
3169 dmzbuf = lpfc_sli_get_buff(phba, pring,
3170 irsp->unsli3.sli3Words[7]);
3171 lpfc_in_buf_free(phba, dmzbuf);
3172 }
3173
3174 return 1;
3175 }
3176
James Smart92d7f7b2007-06-17 19:56:38 -05003177 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
James Smart76bb24e2007-10-27 13:38:00 -04003178 if (irsp->ulpBdeCount != 0) {
3179 saveq->context2 = lpfc_sli_get_buff(phba, pring,
James Smart92d7f7b2007-06-17 19:56:38 -05003180 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04003181 if (!saveq->context2)
3182 lpfc_printf_log(phba,
3183 KERN_ERR,
3184 LOG_SLI,
3185 "0341 Ring %d Cannot find buffer for "
3186 "an unsolicited iocb. tag 0x%x\n",
3187 pring->ringno,
3188 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04003189 }
3190 if (irsp->ulpBdeCount == 2) {
3191 saveq->context3 = lpfc_sli_get_buff(phba, pring,
James Smart51ef4c22007-08-02 11:10:31 -04003192 irsp->unsli3.sli3Words[7]);
James Smart76bb24e2007-10-27 13:38:00 -04003193 if (!saveq->context3)
3194 lpfc_printf_log(phba,
3195 KERN_ERR,
3196 LOG_SLI,
3197 "0342 Ring %d Cannot find buffer for an"
3198 " unsolicited iocb. tag 0x%x\n",
3199 pring->ringno,
3200 irsp->unsli3.sli3Words[7]);
3201 }
3202 list_for_each_entry(iocbq, &saveq->list, list) {
James Smart76bb24e2007-10-27 13:38:00 -04003203 irsp = &(iocbq->iocb);
James Smart76bb24e2007-10-27 13:38:00 -04003204 if (irsp->ulpBdeCount != 0) {
3205 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3206 irsp->un.ulpWord[3]);
James Smart9c2face2008-01-11 01:53:18 -05003207 if (!iocbq->context2)
James Smart76bb24e2007-10-27 13:38:00 -04003208 lpfc_printf_log(phba,
3209 KERN_ERR,
3210 LOG_SLI,
3211 "0343 Ring %d Cannot find "
3212 "buffer for an unsolicited iocb"
3213 ". tag 0x%x\n", pring->ringno,
3214 irsp->un.ulpWord[3]);
3215 }
3216 if (irsp->ulpBdeCount == 2) {
3217 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3218 irsp->unsli3.sli3Words[7]);
James Smart9c2face2008-01-11 01:53:18 -05003219 if (!iocbq->context3)
James Smart76bb24e2007-10-27 13:38:00 -04003220 lpfc_printf_log(phba,
3221 KERN_ERR,
3222 LOG_SLI,
3223 "0344 Ring %d Cannot find "
3224 "buffer for an unsolicited "
3225 "iocb. tag 0x%x\n",
3226 pring->ringno,
3227 irsp->unsli3.sli3Words[7]);
3228 }
3229 }
James Smart92d7f7b2007-06-17 19:56:38 -05003230 }
James Smart9c2face2008-01-11 01:53:18 -05003231 if (irsp->ulpBdeCount != 0 &&
3232 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3233 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3234 int found = 0;
3235
3236 /* search continue save q for same XRI */
3237 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
James Smart7851fe22011-07-22 18:36:52 -04003238 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3239 saveq->iocb.unsli3.rcvsli3.ox_id) {
James Smart9c2face2008-01-11 01:53:18 -05003240 list_add_tail(&saveq->list, &iocbq->list);
3241 found = 1;
3242 break;
3243 }
3244 }
3245 if (!found)
3246 list_add_tail(&saveq->clist,
3247 &pring->iocb_continue_saveq);
3248 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3249 list_del_init(&iocbq->clist);
3250 saveq = iocbq;
3251 irsp = &(saveq->iocb);
3252 } else
3253 return 0;
3254 }
3255 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3256 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3257 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04003258 Rctl = FC_RCTL_ELS_REQ;
3259 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05003260 } else {
3261 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3262 Rctl = w5p->hcsw.Rctl;
3263 Type = w5p->hcsw.Type;
3264
3265 /* Firmware Workaround */
3266 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3267 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3268 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04003269 Rctl = FC_RCTL_ELS_REQ;
3270 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05003271 w5p->hcsw.Rctl = Rctl;
3272 w5p->hcsw.Type = Type;
3273 }
3274 }
James Smart92d7f7b2007-06-17 19:56:38 -05003275
James Smart3772a992009-05-22 14:50:54 -04003276 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
James Smart92d7f7b2007-06-17 19:56:38 -05003277 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003278 "0313 Ring %d handler: unexpected Rctl x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05003279 "Type x%x received\n",
James Smarte8b62012007-08-02 11:10:09 -04003280 pring->ringno, Rctl, Type);
James Smart3772a992009-05-22 14:50:54 -04003281
James Smart92d7f7b2007-06-17 19:56:38 -05003282 return 1;
dea31012005-04-17 16:05:31 -05003283}
3284
James Smarte59058c2008-08-24 21:49:00 -04003285/**
James Smart3621a712009-04-06 18:47:14 -04003286 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
James Smarte59058c2008-08-24 21:49:00 -04003287 * @phba: Pointer to HBA context object.
3288 * @pring: Pointer to driver SLI ring object.
3289 * @prspiocb: Pointer to response iocb object.
3290 *
3291 * This function looks up the iocb_lookup table to get the command iocb
3292 * corresponding to the given response iocb using the iotag of the
James Smarte2a8be52019-05-06 17:26:47 -07003293 * response iocb. The driver calls this function with the hbalock held
3294 * for SLI3 ports or the ring lock held for SLI4 ports.
James Smarte59058c2008-08-24 21:49:00 -04003295 * This function returns the command iocb object if it finds the command
3296 * iocb else returns NULL.
3297 **/
dea31012005-04-17 16:05:31 -05003298static struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05003299lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3300 struct lpfc_sli_ring *pring,
3301 struct lpfc_iocbq *prspiocb)
dea31012005-04-17 16:05:31 -05003302{
dea31012005-04-17 16:05:31 -05003303 struct lpfc_iocbq *cmd_iocb = NULL;
3304 uint16_t iotag;
James Smarte2a8be52019-05-06 17:26:47 -07003305 spinlock_t *temp_lock = NULL;
3306 unsigned long iflag = 0;
dea31012005-04-17 16:05:31 -05003307
James Smarte2a8be52019-05-06 17:26:47 -07003308 if (phba->sli_rev == LPFC_SLI_REV4)
3309 temp_lock = &pring->ring_lock;
3310 else
3311 temp_lock = &phba->hbalock;
3312
3313 spin_lock_irqsave(temp_lock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05003314 iotag = prspiocb->iocb.ulpIoTag;
dea31012005-04-17 16:05:31 -05003315
James Bottomley604a3e32005-10-29 10:28:33 -05003316 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3317 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04003318 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
James Smart89533e92016-10-13 15:06:15 -07003319 /* remove from txcmpl queue list */
3320 list_del_init(&cmd_iocb->list);
James Smart4f2e66c2012-05-09 21:17:07 -04003321 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartc4908502019-01-28 11:14:28 -08003322 pring->txcmplq_cnt--;
James Smarte2a8be52019-05-06 17:26:47 -07003323 spin_unlock_irqrestore(temp_lock, iflag);
James Smart89533e92016-10-13 15:06:15 -07003324 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04003325 }
dea31012005-04-17 16:05:31 -05003326 }
3327
James Smarte2a8be52019-05-06 17:26:47 -07003328 spin_unlock_irqrestore(temp_lock, iflag);
Dick Kennedy372c1872020-06-30 14:50:00 -07003329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart89533e92016-10-13 15:06:15 -07003330 "0317 iotag x%x is out of "
James Bottomley604a3e32005-10-29 10:28:33 -05003331 "range: max iotag x%x wd0 x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003332 iotag, phba->sli.last_iotag,
James Bottomley604a3e32005-10-29 10:28:33 -05003333 *(((uint32_t *) &prspiocb->iocb) + 7));
dea31012005-04-17 16:05:31 -05003334 return NULL;
3335}
3336
James Smarte59058c2008-08-24 21:49:00 -04003337/**
James Smart3772a992009-05-22 14:50:54 -04003338 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3339 * @phba: Pointer to HBA context object.
3340 * @pring: Pointer to driver SLI ring object.
3341 * @iotag: IOCB tag.
3342 *
3343 * This function looks up the iocb_lookup table to get the command iocb
James Smarte2a8be52019-05-06 17:26:47 -07003344 * corresponding to the given iotag. The driver calls this function with
3345 * the ring lock held because this function is an SLI4 port only helper.
James Smart3772a992009-05-22 14:50:54 -04003346 * This function returns the command iocb object if it finds the command
3347 * iocb else returns NULL.
3348 **/
3349static struct lpfc_iocbq *
3350lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3351 struct lpfc_sli_ring *pring, uint16_t iotag)
3352{
James Smart895427b2017-02-12 13:52:30 -08003353 struct lpfc_iocbq *cmd_iocb = NULL;
James Smarte2a8be52019-05-06 17:26:47 -07003354 spinlock_t *temp_lock = NULL;
3355 unsigned long iflag = 0;
James Smart3772a992009-05-22 14:50:54 -04003356
James Smarte2a8be52019-05-06 17:26:47 -07003357 if (phba->sli_rev == LPFC_SLI_REV4)
3358 temp_lock = &pring->ring_lock;
3359 else
3360 temp_lock = &phba->hbalock;
3361
3362 spin_lock_irqsave(temp_lock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003363 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3364 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04003365 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3366 /* remove from txcmpl queue list */
3367 list_del_init(&cmd_iocb->list);
3368 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartc4908502019-01-28 11:14:28 -08003369 pring->txcmplq_cnt--;
James Smarte2a8be52019-05-06 17:26:47 -07003370 spin_unlock_irqrestore(temp_lock, iflag);
James Smart4f2e66c2012-05-09 21:17:07 -04003371 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04003372 }
James Smart3772a992009-05-22 14:50:54 -04003373 }
James Smart89533e92016-10-13 15:06:15 -07003374
James Smarte2a8be52019-05-06 17:26:47 -07003375 spin_unlock_irqrestore(temp_lock, iflag);
Dick Kennedy372c1872020-06-30 14:50:00 -07003376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08003377 "0372 iotag x%x lookup error: max iotag (x%x) "
3378 "iocb_flag x%x\n",
3379 iotag, phba->sli.last_iotag,
3380 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
James Smart3772a992009-05-22 14:50:54 -04003381 return NULL;
3382}
3383
3384/**
James Smart3621a712009-04-06 18:47:14 -04003385 * lpfc_sli_process_sol_iocb - process solicited iocb completion
James Smarte59058c2008-08-24 21:49:00 -04003386 * @phba: Pointer to HBA context object.
3387 * @pring: Pointer to driver SLI ring object.
3388 * @saveq: Pointer to the response iocb to be processed.
3389 *
3390 * This function is called by the ring event handler for non-fcp
3391 * rings when there is a new response iocb in the response ring.
3392 * The caller is not required to hold any locks. This function
3393 * gets the command iocb associated with the response iocb and
3394 * calls the completion handler for the command iocb. If there
3395 * is no completion handler, the function will free the resources
3396 * associated with command iocb. If the response iocb is for
3397 * an already aborted command iocb, the status of the completion
3398 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3399 * This function always returns 1.
3400 **/
dea31012005-04-17 16:05:31 -05003401static int
James Smart2e0fef82007-06-17 19:56:36 -05003402lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea31012005-04-17 16:05:31 -05003403 struct lpfc_iocbq *saveq)
3404{
James Smart2e0fef82007-06-17 19:56:36 -05003405 struct lpfc_iocbq *cmdiocbp;
dea31012005-04-17 16:05:31 -05003406 int rc = 1;
3407 unsigned long iflag;
3408
James Bottomley604a3e32005-10-29 10:28:33 -05003409 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
dea31012005-04-17 16:05:31 -05003410 if (cmdiocbp) {
3411 if (cmdiocbp->iocb_cmpl) {
3412 /*
James Smartea2151b2008-09-07 11:52:10 -04003413 * If an ELS command failed send an event to mgmt
3414 * application.
3415 */
3416 if (saveq->iocb.ulpStatus &&
3417 (pring->ringno == LPFC_ELS_RING) &&
3418 (cmdiocbp->iocb.ulpCommand ==
3419 CMD_ELS_REQUEST64_CR))
3420 lpfc_send_els_failure_event(phba,
3421 cmdiocbp, saveq);
3422
3423 /*
dea31012005-04-17 16:05:31 -05003424 * Post all ELS completions to the worker thread.
3425 * All other are passed to the completion callback.
3426 */
3427 if (pring->ringno == LPFC_ELS_RING) {
James Smart341af102010-01-26 23:07:37 -05003428 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3429 (cmdiocbp->iocb_flag &
3430 LPFC_DRIVER_ABORTED)) {
3431 spin_lock_irqsave(&phba->hbalock,
3432 iflag);
James Smart07951072007-04-25 09:51:38 -04003433 cmdiocbp->iocb_flag &=
3434 ~LPFC_DRIVER_ABORTED;
James Smart341af102010-01-26 23:07:37 -05003435 spin_unlock_irqrestore(&phba->hbalock,
3436 iflag);
James Smart07951072007-04-25 09:51:38 -04003437 saveq->iocb.ulpStatus =
3438 IOSTAT_LOCAL_REJECT;
3439 saveq->iocb.un.ulpWord[4] =
3440 IOERR_SLI_ABORTED;
James Smart0ff10d42008-01-11 01:52:36 -05003441
3442 /* Firmware could still be in progress
3443 * of DMAing payload, so don't free data
3444 * buffer till after a hbeat.
3445 */
James Smart341af102010-01-26 23:07:37 -05003446 spin_lock_irqsave(&phba->hbalock,
3447 iflag);
James Smart0ff10d42008-01-11 01:52:36 -05003448 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
James Smart341af102010-01-26 23:07:37 -05003449 spin_unlock_irqrestore(&phba->hbalock,
3450 iflag);
3451 }
James Smart0f65ff62010-02-26 14:14:23 -05003452 if (phba->sli_rev == LPFC_SLI_REV4) {
3453 if (saveq->iocb_flag &
3454 LPFC_EXCHANGE_BUSY) {
3455 /* Set cmdiocb flag for the
3456 * exchange busy so sgl (xri)
3457 * will not be released until
3458 * the abort xri is received
3459 * from hba.
3460 */
3461 spin_lock_irqsave(
3462 &phba->hbalock, iflag);
3463 cmdiocbp->iocb_flag |=
3464 LPFC_EXCHANGE_BUSY;
3465 spin_unlock_irqrestore(
3466 &phba->hbalock, iflag);
3467 }
3468 if (cmdiocbp->iocb_flag &
3469 LPFC_DRIVER_ABORTED) {
3470 /*
3471 * Clear LPFC_DRIVER_ABORTED
3472 * bit in case it was driver
3473 * initiated abort.
3474 */
3475 spin_lock_irqsave(
3476 &phba->hbalock, iflag);
3477 cmdiocbp->iocb_flag &=
3478 ~LPFC_DRIVER_ABORTED;
3479 spin_unlock_irqrestore(
3480 &phba->hbalock, iflag);
3481 cmdiocbp->iocb.ulpStatus =
3482 IOSTAT_LOCAL_REJECT;
3483 cmdiocbp->iocb.un.ulpWord[4] =
3484 IOERR_ABORT_REQUESTED;
3485 /*
3486 * For SLI4, irsiocb contains
3487 * NO_XRI in sli_xritag, it
3488 * shall not affect releasing
3489 * sgl (xri) process.
3490 */
3491 saveq->iocb.ulpStatus =
3492 IOSTAT_LOCAL_REJECT;
3493 saveq->iocb.un.ulpWord[4] =
3494 IOERR_SLI_ABORTED;
3495 spin_lock_irqsave(
3496 &phba->hbalock, iflag);
3497 saveq->iocb_flag |=
3498 LPFC_DELAY_MEM_FREE;
3499 spin_unlock_irqrestore(
3500 &phba->hbalock, iflag);
3501 }
James Smart07951072007-04-25 09:51:38 -04003502 }
dea31012005-04-17 16:05:31 -05003503 }
James Smart2e0fef82007-06-17 19:56:36 -05003504 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
James Bottomley604a3e32005-10-29 10:28:33 -05003505 } else
3506 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea31012005-04-17 16:05:31 -05003507 } else {
3508 /*
3509 * Unknown initiating command based on the response iotag.
3510 * This could be the case on the ELS ring because of
3511 * lpfc_els_abort().
3512 */
3513 if (pring->ringno != LPFC_ELS_RING) {
3514 /*
3515 * Ring <ringno> handler: unexpected completion IoTag
3516 * <IoTag>
3517 */
James Smarta257bf92009-04-06 18:48:10 -04003518 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003519 "0322 Ring %d handler: "
3520 "unexpected completion IoTag x%x "
3521 "Data: x%x x%x x%x x%x\n",
3522 pring->ringno,
3523 saveq->iocb.ulpIoTag,
3524 saveq->iocb.ulpStatus,
3525 saveq->iocb.un.ulpWord[4],
3526 saveq->iocb.ulpCommand,
3527 saveq->iocb.ulpContext);
dea31012005-04-17 16:05:31 -05003528 }
3529 }
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04003530
dea31012005-04-17 16:05:31 -05003531 return rc;
3532}
3533
James Smarte59058c2008-08-24 21:49:00 -04003534/**
James Smart3621a712009-04-06 18:47:14 -04003535 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
James Smarte59058c2008-08-24 21:49:00 -04003536 * @phba: Pointer to HBA context object.
3537 * @pring: Pointer to driver SLI ring object.
3538 *
3539 * This function is called from the iocb ring event handlers when
3540 * put pointer is ahead of the get pointer for a ring. This function signal
3541 * an error attention condition to the worker thread and the worker
3542 * thread will transition the HBA to offline state.
3543 **/
James Smart2e0fef82007-06-17 19:56:36 -05003544static void
3545lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003546{
James Smart34b02dc2008-08-24 21:49:55 -04003547 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003548 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003549 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003550 * rsp ring <portRspMax>
3551 */
Dick Kennedy372c1872020-06-30 14:50:00 -07003552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04003553 "0312 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003554 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04003555 pring->ringno, le32_to_cpu(pgp->rspPutInx),
James Smart7e56aa22012-08-03 12:35:34 -04003556 pring->sli.sli3.numRiocb);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003557
James Smart2e0fef82007-06-17 19:56:36 -05003558 phba->link_state = LPFC_HBA_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003559
3560 /*
3561 * All error attention handlers are posted to
3562 * worker thread
3563 */
3564 phba->work_ha |= HA_ERATT;
3565 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05003566
James Smart5e9d9b82008-06-14 22:52:53 -04003567 lpfc_worker_wake_up(phba);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003568
3569 return;
3570}
3571
James Smarte59058c2008-08-24 21:49:00 -04003572/**
James Smart3621a712009-04-06 18:47:14 -04003573 * lpfc_poll_eratt - Error attention polling timer timeout handler
Lee Jones7af29d42020-07-21 17:41:31 +01003574 * @t: Context to fetch pointer to address of HBA context object from.
James Smart93996272008-08-24 21:50:30 -04003575 *
3576 * This function is invoked by the Error Attention polling timer when the
3577 * timer times out. It will check the SLI Error Attention register for
3578 * possible attention events. If so, it will post an Error Attention event
3579 * and wake up worker thread to process it. Otherwise, it will set up the
3580 * Error Attention polling timer for the next poll.
3581 **/
Kees Cookf22eb4d2017-09-06 20:24:26 -07003582void lpfc_poll_eratt(struct timer_list *t)
James Smart93996272008-08-24 21:50:30 -04003583{
3584 struct lpfc_hba *phba;
James Smarteb016562014-09-03 12:58:06 -04003585 uint32_t eratt = 0;
James Smartaa6fbb72012-08-03 12:36:03 -04003586 uint64_t sli_intr, cnt;
James Smart93996272008-08-24 21:50:30 -04003587
Kees Cookf22eb4d2017-09-06 20:24:26 -07003588 phba = from_timer(phba, t, eratt_poll);
James Smart93996272008-08-24 21:50:30 -04003589
James Smartaa6fbb72012-08-03 12:36:03 -04003590 /* Here we will also keep track of interrupts per sec of the hba */
3591 sli_intr = phba->sli.slistat.sli_intr;
3592
3593 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3594 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3595 sli_intr);
3596 else
3597 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3598
James Smart65791f12016-07-06 12:35:56 -07003599 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3600 do_div(cnt, phba->eratt_poll_interval);
James Smartaa6fbb72012-08-03 12:36:03 -04003601 phba->sli.slistat.sli_ips = cnt;
3602
3603 phba->sli.slistat.sli_prev_intr = sli_intr;
3604
James Smart93996272008-08-24 21:50:30 -04003605 /* Check chip HA register for error event */
3606 eratt = lpfc_sli_check_eratt(phba);
3607
3608 if (eratt)
3609 /* Tell the worker thread there is work to do */
3610 lpfc_worker_wake_up(phba);
3611 else
3612 /* Restart the timer for next eratt poll */
James Smart256ec0d2013-04-17 20:14:58 -04003613 mod_timer(&phba->eratt_poll,
3614 jiffies +
James Smart65791f12016-07-06 12:35:56 -07003615 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smart93996272008-08-24 21:50:30 -04003616 return;
3617}
3618
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003619
James Smarte59058c2008-08-24 21:49:00 -04003620/**
James Smart3621a712009-04-06 18:47:14 -04003621 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
James Smarte59058c2008-08-24 21:49:00 -04003622 * @phba: Pointer to HBA context object.
3623 * @pring: Pointer to driver SLI ring object.
3624 * @mask: Host attention register mask for this ring.
3625 *
3626 * This function is called from the interrupt context when there is a ring
3627 * event for the fcp ring. The caller does not hold any lock.
3628 * The function processes each response iocb in the response ring until it
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003629 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
James Smarte59058c2008-08-24 21:49:00 -04003630 * LE bit set. The function will call the completion handler of the command iocb
3631 * if the response iocb indicates a completion for a command iocb or it is
3632 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3633 * function if this is an unsolicited iocb.
dea31012005-04-17 16:05:31 -05003634 * This routine presumes LPFC_FCP_RING handling and doesn't bother
James Smart45ed1192009-10-02 15:17:02 -04003635 * to check it explicitly.
3636 */
3637int
James Smart2e0fef82007-06-17 19:56:36 -05003638lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3639 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003640{
James Smart34b02dc2008-08-24 21:49:55 -04003641 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea31012005-04-17 16:05:31 -05003642 IOCB_t *irsp = NULL;
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003643 IOCB_t *entry = NULL;
dea31012005-04-17 16:05:31 -05003644 struct lpfc_iocbq *cmdiocbq = NULL;
3645 struct lpfc_iocbq rspiocbq;
dea31012005-04-17 16:05:31 -05003646 uint32_t status;
3647 uint32_t portRspPut, portRspMax;
3648 int rc = 1;
3649 lpfc_iocb_type type;
3650 unsigned long iflag;
3651 uint32_t rsp_cmpl = 0;
dea31012005-04-17 16:05:31 -05003652
James Smart2e0fef82007-06-17 19:56:36 -05003653 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003654 pring->stats.iocb_event++;
3655
dea31012005-04-17 16:05:31 -05003656 /*
3657 * The next available response entry should never exceed the maximum
3658 * entries. If it does, treat it as an adapter hardware error.
3659 */
James Smart7e56aa22012-08-03 12:35:34 -04003660 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003661 portRspPut = le32_to_cpu(pgp->rspPutInx);
3662 if (unlikely(portRspPut >= portRspMax)) {
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003663 lpfc_sli_rsp_pointers_error(phba, pring);
James Smart2e0fef82007-06-17 19:56:36 -05003664 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003665 return 1;
3666 }
James Smart45ed1192009-10-02 15:17:02 -04003667 if (phba->fcp_ring_in_use) {
3668 spin_unlock_irqrestore(&phba->hbalock, iflag);
3669 return 1;
3670 } else
3671 phba->fcp_ring_in_use = 1;
dea31012005-04-17 16:05:31 -05003672
3673 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003674 while (pring->sli.sli3.rspidx != portRspPut) {
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003675 /*
3676 * Fetch an entry off the ring and copy it into a local data
3677 * structure. The copy involves a byte-swap since the
3678 * network byte order and pci byte orders are different.
3679 */
James Smarted957682007-06-17 19:56:37 -05003680 entry = lpfc_resp_iocb(phba, pring);
James Smart858c9f62007-06-17 19:56:39 -05003681 phba->last_completion_time = jiffies;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003682
James Smart7e56aa22012-08-03 12:35:34 -04003683 if (++pring->sli.sli3.rspidx >= portRspMax)
3684 pring->sli.sli3.rspidx = 0;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003685
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003686 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3687 (uint32_t *) &rspiocbq.iocb,
James Smarted957682007-06-17 19:56:37 -05003688 phba->iocb_rsp_size);
James Smarta4bc3372006-12-02 13:34:16 -05003689 INIT_LIST_HEAD(&(rspiocbq.list));
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003690 irsp = &rspiocbq.iocb;
3691
dea31012005-04-17 16:05:31 -05003692 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3693 pring->stats.iocb_rsp++;
3694 rsp_cmpl++;
3695
3696 if (unlikely(irsp->ulpStatus)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003697 /*
3698 * If resource errors reported from HBA, reduce
3699 * queuedepths of the SCSI device.
3700 */
3701 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003702 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3703 IOERR_NO_RESOURCES)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003704 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003705 phba->lpfc_rampdown_queue_depth(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05003706 spin_lock_irqsave(&phba->hbalock, iflag);
3707 }
3708
dea31012005-04-17 16:05:31 -05003709 /* Rsp ring <ringno> error: IOCB */
3710 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003711 "0336 Rsp Ring %d error: IOCB Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05003712 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003713 pring->ringno,
James Smart92d7f7b2007-06-17 19:56:38 -05003714 irsp->un.ulpWord[0],
3715 irsp->un.ulpWord[1],
3716 irsp->un.ulpWord[2],
3717 irsp->un.ulpWord[3],
3718 irsp->un.ulpWord[4],
3719 irsp->un.ulpWord[5],
James Smartd7c255b2008-08-24 21:50:00 -04003720 *(uint32_t *)&irsp->un1,
3721 *((uint32_t *)&irsp->un1 + 1));
dea31012005-04-17 16:05:31 -05003722 }
3723
3724 switch (type) {
3725 case LPFC_ABORT_IOCB:
3726 case LPFC_SOL_IOCB:
3727 /*
3728 * Idle exchange closed via ABTS from port. No iocb
3729 * resources need to be recovered.
3730 */
3731 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
James Smartdca94792006-08-01 07:34:08 -04003732 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003733 "0333 IOCB cmd 0x%x"
James Smartdca94792006-08-01 07:34:08 -04003734 " processed. Skipping"
James Smart92d7f7b2007-06-17 19:56:38 -05003735 " completion\n",
James Smartdca94792006-08-01 07:34:08 -04003736 irsp->ulpCommand);
dea31012005-04-17 16:05:31 -05003737 break;
3738 }
3739
James Smarte2a8be52019-05-06 17:26:47 -07003740 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05003741 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3742 &rspiocbq);
James Smarte2a8be52019-05-06 17:26:47 -07003743 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart0f65ff62010-02-26 14:14:23 -05003744 if (unlikely(!cmdiocbq))
3745 break;
3746 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3747 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3748 if (cmdiocbq->iocb_cmpl) {
3749 spin_unlock_irqrestore(&phba->hbalock, iflag);
3750 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3751 &rspiocbq);
3752 spin_lock_irqsave(&phba->hbalock, iflag);
3753 }
dea31012005-04-17 16:05:31 -05003754 break;
James Smarta4bc3372006-12-02 13:34:16 -05003755 case LPFC_UNSOL_IOCB:
James Smart2e0fef82007-06-17 19:56:36 -05003756 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003757 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
James Smart2e0fef82007-06-17 19:56:36 -05003758 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003759 break;
dea31012005-04-17 16:05:31 -05003760 default:
3761 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3762 char adaptermsg[LPFC_MAX_ADPTMSG];
3763 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3764 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3765 MAX_MSG_DATA);
Joe Perches898eb712007-10-18 03:06:30 -07003766 dev_warn(&((phba->pcidev)->dev),
3767 "lpfc%d: %s\n",
dea31012005-04-17 16:05:31 -05003768 phba->brd_no, adaptermsg);
3769 } else {
3770 /* Unknown IOCB command */
Dick Kennedy372c1872020-06-30 14:50:00 -07003771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04003772 "0334 Unknown IOCB command "
James Smart92d7f7b2007-06-17 19:56:38 -05003773 "Data: x%x, x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003774 type, irsp->ulpCommand,
James Smart92d7f7b2007-06-17 19:56:38 -05003775 irsp->ulpStatus,
3776 irsp->ulpIoTag,
3777 irsp->ulpContext);
dea31012005-04-17 16:05:31 -05003778 }
3779 break;
3780 }
3781
3782 /*
3783 * The response IOCB has been processed. Update the ring
3784 * pointer in SLIM. If the port response put pointer has not
3785 * been updated, sync the pgp->rspPutInx and fetch the new port
3786 * response put pointer.
3787 */
James Smart7e56aa22012-08-03 12:35:34 -04003788 writel(pring->sli.sli3.rspidx,
3789 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003790
James Smart7e56aa22012-08-03 12:35:34 -04003791 if (pring->sli.sli3.rspidx == portRspPut)
dea31012005-04-17 16:05:31 -05003792 portRspPut = le32_to_cpu(pgp->rspPutInx);
3793 }
3794
3795 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3796 pring->stats.iocb_rsp_full++;
3797 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3798 writel(status, phba->CAregaddr);
3799 readl(phba->CAregaddr);
3800 }
3801 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3802 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3803 pring->stats.iocb_cmd_empty++;
3804
3805 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003806 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003807 lpfc_sli_resume_iocb(phba, pring);
3808
3809 if ((pring->lpfc_sli_cmd_available))
3810 (pring->lpfc_sli_cmd_available) (phba, pring);
3811
3812 }
3813
James Smart45ed1192009-10-02 15:17:02 -04003814 phba->fcp_ring_in_use = 0;
James Smart2e0fef82007-06-17 19:56:36 -05003815 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003816 return rc;
3817}
3818
James Smarte59058c2008-08-24 21:49:00 -04003819/**
James Smart3772a992009-05-22 14:50:54 -04003820 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3821 * @phba: Pointer to HBA context object.
3822 * @pring: Pointer to driver SLI ring object.
3823 * @rspiocbp: Pointer to driver response IOCB object.
3824 *
3825 * This function is called from the worker thread when there is a slow-path
3826 * response IOCB to process. This function chains all the response iocbs until
3827 * seeing the iocb with the LE bit set. The function will call
3828 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3829 * completion of a command iocb. The function will call the
3830 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3831 * The function frees the resources or calls the completion handler if this
3832 * iocb is an abort completion. The function returns NULL when the response
3833 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3834 * this function shall chain the iocb on to the iocb_continueq and return the
3835 * response iocb passed in.
3836 **/
3837static struct lpfc_iocbq *
3838lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3839 struct lpfc_iocbq *rspiocbp)
3840{
3841 struct lpfc_iocbq *saveq;
3842 struct lpfc_iocbq *cmdiocbp;
3843 struct lpfc_iocbq *next_iocb;
3844 IOCB_t *irsp = NULL;
3845 uint32_t free_saveq;
3846 uint8_t iocb_cmd_type;
3847 lpfc_iocb_type type;
3848 unsigned long iflag;
3849 int rc;
3850
3851 spin_lock_irqsave(&phba->hbalock, iflag);
3852 /* First add the response iocb to the countinueq list */
3853 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3854 pring->iocb_continueq_cnt++;
3855
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02003856 /* Now, determine whether the list is completed for processing */
James Smart3772a992009-05-22 14:50:54 -04003857 irsp = &rspiocbp->iocb;
3858 if (irsp->ulpLe) {
3859 /*
3860 * By default, the driver expects to free all resources
3861 * associated with this iocb completion.
3862 */
3863 free_saveq = 1;
3864 saveq = list_get_first(&pring->iocb_continueq,
3865 struct lpfc_iocbq, list);
3866 irsp = &(saveq->iocb);
3867 list_del_init(&pring->iocb_continueq);
3868 pring->iocb_continueq_cnt = 0;
3869
3870 pring->stats.iocb_rsp++;
3871
3872 /*
3873 * If resource errors reported from HBA, reduce
3874 * queuedepths of the SCSI device.
3875 */
3876 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003877 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3878 IOERR_NO_RESOURCES)) {
James Smart3772a992009-05-22 14:50:54 -04003879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3880 phba->lpfc_rampdown_queue_depth(phba);
3881 spin_lock_irqsave(&phba->hbalock, iflag);
3882 }
3883
3884 if (irsp->ulpStatus) {
3885 /* Rsp ring <ringno> error: IOCB */
3886 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3887 "0328 Rsp Ring %d error: "
3888 "IOCB Data: "
3889 "x%x x%x x%x x%x "
3890 "x%x x%x x%x x%x "
3891 "x%x x%x x%x x%x "
3892 "x%x x%x x%x x%x\n",
3893 pring->ringno,
3894 irsp->un.ulpWord[0],
3895 irsp->un.ulpWord[1],
3896 irsp->un.ulpWord[2],
3897 irsp->un.ulpWord[3],
3898 irsp->un.ulpWord[4],
3899 irsp->un.ulpWord[5],
3900 *(((uint32_t *) irsp) + 6),
3901 *(((uint32_t *) irsp) + 7),
3902 *(((uint32_t *) irsp) + 8),
3903 *(((uint32_t *) irsp) + 9),
3904 *(((uint32_t *) irsp) + 10),
3905 *(((uint32_t *) irsp) + 11),
3906 *(((uint32_t *) irsp) + 12),
3907 *(((uint32_t *) irsp) + 13),
3908 *(((uint32_t *) irsp) + 14),
3909 *(((uint32_t *) irsp) + 15));
3910 }
3911
3912 /*
3913 * Fetch the IOCB command type and call the correct completion
3914 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3915 * get freed back to the lpfc_iocb_list by the discovery
3916 * kernel thread.
3917 */
3918 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3919 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3920 switch (type) {
3921 case LPFC_SOL_IOCB:
3922 spin_unlock_irqrestore(&phba->hbalock, iflag);
3923 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3924 spin_lock_irqsave(&phba->hbalock, iflag);
3925 break;
3926
3927 case LPFC_UNSOL_IOCB:
3928 spin_unlock_irqrestore(&phba->hbalock, iflag);
3929 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3930 spin_lock_irqsave(&phba->hbalock, iflag);
3931 if (!rc)
3932 free_saveq = 0;
3933 break;
3934
3935 case LPFC_ABORT_IOCB:
3936 cmdiocbp = NULL;
James Smarte2a8be52019-05-06 17:26:47 -07003937 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3938 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003939 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3940 saveq);
James Smarte2a8be52019-05-06 17:26:47 -07003941 spin_lock_irqsave(&phba->hbalock, iflag);
3942 }
James Smart3772a992009-05-22 14:50:54 -04003943 if (cmdiocbp) {
3944 /* Call the specified completion routine */
3945 if (cmdiocbp->iocb_cmpl) {
3946 spin_unlock_irqrestore(&phba->hbalock,
3947 iflag);
3948 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3949 saveq);
3950 spin_lock_irqsave(&phba->hbalock,
3951 iflag);
3952 } else
3953 __lpfc_sli_release_iocbq(phba,
3954 cmdiocbp);
3955 }
3956 break;
3957
3958 case LPFC_UNKNOWN_IOCB:
3959 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3960 char adaptermsg[LPFC_MAX_ADPTMSG];
3961 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3962 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3963 MAX_MSG_DATA);
3964 dev_warn(&((phba->pcidev)->dev),
3965 "lpfc%d: %s\n",
3966 phba->brd_no, adaptermsg);
3967 } else {
3968 /* Unknown IOCB command */
Dick Kennedy372c1872020-06-30 14:50:00 -07003969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04003970 "0335 Unknown IOCB "
3971 "command Data: x%x "
3972 "x%x x%x x%x\n",
3973 irsp->ulpCommand,
3974 irsp->ulpStatus,
3975 irsp->ulpIoTag,
3976 irsp->ulpContext);
3977 }
3978 break;
3979 }
3980
3981 if (free_saveq) {
3982 list_for_each_entry_safe(rspiocbp, next_iocb,
3983 &saveq->list, list) {
James Smart61f35bf2013-05-31 17:03:48 -04003984 list_del_init(&rspiocbp->list);
James Smart3772a992009-05-22 14:50:54 -04003985 __lpfc_sli_release_iocbq(phba, rspiocbp);
3986 }
3987 __lpfc_sli_release_iocbq(phba, saveq);
3988 }
3989 rspiocbp = NULL;
3990 }
3991 spin_unlock_irqrestore(&phba->hbalock, iflag);
3992 return rspiocbp;
3993}
3994
3995/**
3996 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
James Smarte59058c2008-08-24 21:49:00 -04003997 * @phba: Pointer to HBA context object.
3998 * @pring: Pointer to driver SLI ring object.
3999 * @mask: Host attention register mask for this ring.
4000 *
James Smart3772a992009-05-22 14:50:54 -04004001 * This routine wraps the actual slow_ring event process routine from the
4002 * API jump table function pointer from the lpfc_hba struct.
James Smarte59058c2008-08-24 21:49:00 -04004003 **/
James Smart3772a992009-05-22 14:50:54 -04004004void
James Smart2e0fef82007-06-17 19:56:36 -05004005lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4006 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05004007{
James Smart3772a992009-05-22 14:50:54 -04004008 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4009}
4010
4011/**
4012 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4013 * @phba: Pointer to HBA context object.
4014 * @pring: Pointer to driver SLI ring object.
4015 * @mask: Host attention register mask for this ring.
4016 *
4017 * This function is called from the worker thread when there is a ring event
4018 * for non-fcp rings. The caller does not hold any lock. The function will
4019 * remove each response iocb in the response ring and calls the handle
4020 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4021 **/
4022static void
4023lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4024 struct lpfc_sli_ring *pring, uint32_t mask)
4025{
James Smart34b02dc2008-08-24 21:49:55 -04004026 struct lpfc_pgp *pgp;
dea31012005-04-17 16:05:31 -05004027 IOCB_t *entry;
4028 IOCB_t *irsp = NULL;
4029 struct lpfc_iocbq *rspiocbp = NULL;
dea31012005-04-17 16:05:31 -05004030 uint32_t portRspPut, portRspMax;
dea31012005-04-17 16:05:31 -05004031 unsigned long iflag;
James Smart3772a992009-05-22 14:50:54 -04004032 uint32_t status;
dea31012005-04-17 16:05:31 -05004033
James Smart34b02dc2008-08-24 21:49:55 -04004034 pgp = &phba->port_gp[pring->ringno];
James Smart2e0fef82007-06-17 19:56:36 -05004035 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05004036 pring->stats.iocb_event++;
4037
dea31012005-04-17 16:05:31 -05004038 /*
4039 * The next available response entry should never exceed the maximum
4040 * entries. If it does, treat it as an adapter hardware error.
4041 */
James Smart7e56aa22012-08-03 12:35:34 -04004042 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05004043 portRspPut = le32_to_cpu(pgp->rspPutInx);
4044 if (portRspPut >= portRspMax) {
4045 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02004046 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea31012005-04-17 16:05:31 -05004047 * rsp ring <portRspMax>
4048 */
Dick Kennedy372c1872020-06-30 14:50:00 -07004049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04004050 "0303 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02004051 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04004052 pring->ringno, portRspPut, portRspMax);
dea31012005-04-17 16:05:31 -05004053
James Smart2e0fef82007-06-17 19:56:36 -05004054 phba->link_state = LPFC_HBA_ERROR;
4055 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05004056
4057 phba->work_hs = HS_FFER3;
4058 lpfc_handle_eratt(phba);
4059
James Smart3772a992009-05-22 14:50:54 -04004060 return;
dea31012005-04-17 16:05:31 -05004061 }
4062
4063 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04004064 while (pring->sli.sli3.rspidx != portRspPut) {
dea31012005-04-17 16:05:31 -05004065 /*
4066 * Build a completion list and call the appropriate handler.
4067 * The process is to get the next available response iocb, get
4068 * a free iocb from the list, copy the response data into the
4069 * free iocb, insert to the continuation list, and update the
4070 * next response index to slim. This process makes response
4071 * iocb's in the ring available to DMA as fast as possible but
4072 * pays a penalty for a copy operation. Since the iocb is
4073 * only 32 bytes, this penalty is considered small relative to
4074 * the PCI reads for register values and a slim write. When
4075 * the ulpLe field is set, the entire Command has been
4076 * received.
4077 */
James Smarted957682007-06-17 19:56:37 -05004078 entry = lpfc_resp_iocb(phba, pring);
4079
James Smart858c9f62007-06-17 19:56:39 -05004080 phba->last_completion_time = jiffies;
James Smart2e0fef82007-06-17 19:56:36 -05004081 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05004082 if (rspiocbp == NULL) {
4083 printk(KERN_ERR "%s: out of buffers! Failing "
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07004084 "completion.\n", __func__);
dea31012005-04-17 16:05:31 -05004085 break;
4086 }
4087
James Smarted957682007-06-17 19:56:37 -05004088 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4089 phba->iocb_rsp_size);
dea31012005-04-17 16:05:31 -05004090 irsp = &rspiocbp->iocb;
4091
James Smart7e56aa22012-08-03 12:35:34 -04004092 if (++pring->sli.sli3.rspidx >= portRspMax)
4093 pring->sli.sli3.rspidx = 0;
dea31012005-04-17 16:05:31 -05004094
James Smarta58cbd52007-08-02 11:09:43 -04004095 if (pring->ringno == LPFC_ELS_RING) {
4096 lpfc_debugfs_slow_ring_trc(phba,
4097 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4098 *(((uint32_t *) irsp) + 4),
4099 *(((uint32_t *) irsp) + 6),
4100 *(((uint32_t *) irsp) + 7));
4101 }
4102
James Smart7e56aa22012-08-03 12:35:34 -04004103 writel(pring->sli.sli3.rspidx,
4104 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05004105
James Smart3772a992009-05-22 14:50:54 -04004106 spin_unlock_irqrestore(&phba->hbalock, iflag);
4107 /* Handle the response IOCB */
4108 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4109 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05004110
4111 /*
4112 * If the port response put pointer has not been updated, sync
4113 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4114 * response put pointer.
4115 */
James Smart7e56aa22012-08-03 12:35:34 -04004116 if (pring->sli.sli3.rspidx == portRspPut) {
dea31012005-04-17 16:05:31 -05004117 portRspPut = le32_to_cpu(pgp->rspPutInx);
4118 }
James Smart7e56aa22012-08-03 12:35:34 -04004119 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea31012005-04-17 16:05:31 -05004120
James Smart92d7f7b2007-06-17 19:56:38 -05004121 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea31012005-04-17 16:05:31 -05004122 /* At least one response entry has been freed */
4123 pring->stats.iocb_rsp_full++;
4124 /* SET RxRE_RSP in Chip Att register */
4125 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4126 writel(status, phba->CAregaddr);
4127 readl(phba->CAregaddr); /* flush */
4128 }
4129 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4130 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4131 pring->stats.iocb_cmd_empty++;
4132
4133 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04004134 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05004135 lpfc_sli_resume_iocb(phba, pring);
4136
4137 if ((pring->lpfc_sli_cmd_available))
4138 (pring->lpfc_sli_cmd_available) (phba, pring);
4139
4140 }
4141
James Smart2e0fef82007-06-17 19:56:36 -05004142 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04004143 return;
dea31012005-04-17 16:05:31 -05004144}
4145
James Smarte59058c2008-08-24 21:49:00 -04004146/**
James Smart4f774512009-05-22 14:52:35 -04004147 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4148 * @phba: Pointer to HBA context object.
4149 * @pring: Pointer to driver SLI ring object.
4150 * @mask: Host attention register mask for this ring.
4151 *
4152 * This function is called from the worker thread when there is a pending
4153 * ELS response iocb on the driver internal slow-path response iocb worker
4154 * queue. The caller does not hold any lock. The function will remove each
4155 * response iocb from the response worker queue and calls the handle
4156 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4157 **/
4158static void
4159lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4160 struct lpfc_sli_ring *pring, uint32_t mask)
4161{
4162 struct lpfc_iocbq *irspiocbq;
James Smart4d9ab992009-10-02 15:16:39 -04004163 struct hbq_dmabuf *dmabuf;
4164 struct lpfc_cq_event *cq_event;
James Smart4f774512009-05-22 14:52:35 -04004165 unsigned long iflag;
James Smart0ef01a22018-09-10 10:30:45 -07004166 int count = 0;
James Smart4f774512009-05-22 14:52:35 -04004167
James Smart45ed1192009-10-02 15:17:02 -04004168 spin_lock_irqsave(&phba->hbalock, iflag);
4169 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4170 spin_unlock_irqrestore(&phba->hbalock, iflag);
4171 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
James Smart4f774512009-05-22 14:52:35 -04004172 /* Get the response iocb from the head of work queue */
4173 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart45ed1192009-10-02 15:17:02 -04004174 list_remove_head(&phba->sli4_hba.sp_queue_event,
James Smart4d9ab992009-10-02 15:16:39 -04004175 cq_event, struct lpfc_cq_event, list);
James Smart4f774512009-05-22 14:52:35 -04004176 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart4d9ab992009-10-02 15:16:39 -04004177
4178 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4179 case CQE_CODE_COMPL_WQE:
4180 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4181 cq_event);
James Smart45ed1192009-10-02 15:17:02 -04004182 /* Translate ELS WCQE to response IOCBQ */
4183 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4184 irspiocbq);
4185 if (irspiocbq)
4186 lpfc_sli_sp_handle_rspiocb(phba, pring,
4187 irspiocbq);
James Smart0ef01a22018-09-10 10:30:45 -07004188 count++;
James Smart4d9ab992009-10-02 15:16:39 -04004189 break;
4190 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -04004191 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -04004192 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4193 cq_event);
4194 lpfc_sli4_handle_received_buffer(phba, dmabuf);
James Smart0ef01a22018-09-10 10:30:45 -07004195 count++;
James Smart4d9ab992009-10-02 15:16:39 -04004196 break;
4197 default:
4198 break;
4199 }
James Smart0ef01a22018-09-10 10:30:45 -07004200
4201 /* Limit the number of events to 64 to avoid soft lockups */
4202 if (count == 64)
4203 break;
James Smart4f774512009-05-22 14:52:35 -04004204 }
4205}
4206
4207/**
James Smart3621a712009-04-06 18:47:14 -04004208 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
James Smarte59058c2008-08-24 21:49:00 -04004209 * @phba: Pointer to HBA context object.
4210 * @pring: Pointer to driver SLI ring object.
4211 *
4212 * This function aborts all iocbs in the given ring and frees all the iocb
4213 * objects in txq. This function issues an abort iocb for all the iocb commands
4214 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4215 * the return of this function. The caller is not required to hold any locks.
4216 **/
James Smart2e0fef82007-06-17 19:56:36 -05004217void
dea31012005-04-17 16:05:31 -05004218lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4219{
James Smart2534ba72007-04-25 09:52:20 -04004220 LIST_HEAD(completions);
dea31012005-04-17 16:05:31 -05004221 struct lpfc_iocbq *iocb, *next_iocb;
dea31012005-04-17 16:05:31 -05004222
James Smart92d7f7b2007-06-17 19:56:38 -05004223 if (pring->ringno == LPFC_ELS_RING) {
4224 lpfc_fabric_abort_hba(phba);
4225 }
4226
dea31012005-04-17 16:05:31 -05004227 /* Error everything on txq and txcmplq
4228 * First do the txq.
4229 */
James Smartdb55fba2014-04-04 13:52:02 -04004230 if (phba->sli_rev >= LPFC_SLI_REV4) {
4231 spin_lock_irq(&pring->ring_lock);
4232 list_splice_init(&pring->txq, &completions);
4233 pring->txq_cnt = 0;
4234 spin_unlock_irq(&pring->ring_lock);
dea31012005-04-17 16:05:31 -05004235
James Smartdb55fba2014-04-04 13:52:02 -04004236 spin_lock_irq(&phba->hbalock);
4237 /* Next issue ABTS for everything on the txcmplq */
4238 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
James Smartdb7531d2020-11-15 11:26:44 -08004239 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
James Smartdb55fba2014-04-04 13:52:02 -04004240 spin_unlock_irq(&phba->hbalock);
4241 } else {
4242 spin_lock_irq(&phba->hbalock);
4243 list_splice_init(&pring->txq, &completions);
4244 pring->txq_cnt = 0;
James Smart2534ba72007-04-25 09:52:20 -04004245
James Smartdb55fba2014-04-04 13:52:02 -04004246 /* Next issue ABTS for everything on the txcmplq */
4247 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
James Smartdb7531d2020-11-15 11:26:44 -08004248 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
James Smartdb55fba2014-04-04 13:52:02 -04004249 spin_unlock_irq(&phba->hbalock);
4250 }
James Smarta22d73b2021-01-04 10:02:38 -08004251 /* Make sure HBA is alive */
4252 lpfc_issue_hb_tmo(phba);
James Smart2534ba72007-04-25 09:52:20 -04004253
James Smarta257bf92009-04-06 18:48:10 -04004254 /* Cancel all the IOCBs from the completions list */
4255 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4256 IOERR_SLI_ABORTED);
dea31012005-04-17 16:05:31 -05004257}
4258
James Smarte59058c2008-08-24 21:49:00 -04004259/**
James Smartdb55fba2014-04-04 13:52:02 -04004260 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4261 * @phba: Pointer to HBA context object.
James Smartdb55fba2014-04-04 13:52:02 -04004262 *
4263 * This function aborts all iocbs in FCP rings and frees all the iocb
4264 * objects in txq. This function issues an abort iocb for all the iocb commands
4265 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4266 * the return of this function. The caller is not required to hold any locks.
4267 **/
4268void
4269lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4270{
4271 struct lpfc_sli *psli = &phba->sli;
4272 struct lpfc_sli_ring *pring;
4273 uint32_t i;
4274
4275 /* Look on all the FCP Rings for the iotag */
4276 if (phba->sli_rev >= LPFC_SLI_REV4) {
James Smartcdb42be2019-01-28 11:14:21 -08004277 for (i = 0; i < phba->cfg_hdw_queue; i++) {
James Smartc00f62e2019-08-14 16:57:11 -07004278 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
James Smartdb55fba2014-04-04 13:52:02 -04004279 lpfc_sli_abort_iocb_ring(phba, pring);
4280 }
4281 } else {
James Smart895427b2017-02-12 13:52:30 -08004282 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04004283 lpfc_sli_abort_iocb_ring(phba, pring);
4284 }
4285}
4286
James Smart895427b2017-02-12 13:52:30 -08004287/**
James Smartc00f62e2019-08-14 16:57:11 -07004288 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
James Smarta8e497d2008-08-24 21:50:11 -04004289 * @phba: Pointer to HBA context object.
4290 *
James Smartc00f62e2019-08-14 16:57:11 -07004291 * This function flushes all iocbs in the IO ring and frees all the iocb
James Smarta8e497d2008-08-24 21:50:11 -04004292 * objects in txq and txcmplq. This function will not issue abort iocbs
4293 * for all the iocb commands in txcmplq, they will just be returned with
4294 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4295 * slot has been permanently disabled.
4296 **/
4297void
James Smartc00f62e2019-08-14 16:57:11 -07004298lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
James Smarta8e497d2008-08-24 21:50:11 -04004299{
4300 LIST_HEAD(txq);
4301 LIST_HEAD(txcmplq);
James Smarta8e497d2008-08-24 21:50:11 -04004302 struct lpfc_sli *psli = &phba->sli;
4303 struct lpfc_sli_ring *pring;
James Smartdb55fba2014-04-04 13:52:02 -04004304 uint32_t i;
James Smartc1dd9112018-01-30 15:58:57 -08004305 struct lpfc_iocbq *piocb, *next_iocb;
James Smarta8e497d2008-08-24 21:50:11 -04004306
4307 spin_lock_irq(&phba->hbalock);
James Smart4cd70892020-03-22 11:12:57 -07004308 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4309 !phba->sli4_hba.hdwq) {
4310 spin_unlock_irq(&phba->hbalock);
4311 return;
4312 }
James Smart4f2e66c2012-05-09 21:17:07 -04004313 /* Indicate the I/O queues are flushed */
James Smartc00f62e2019-08-14 16:57:11 -07004314 phba->hba_flag |= HBA_IOQ_FLUSH;
James Smarta8e497d2008-08-24 21:50:11 -04004315 spin_unlock_irq(&phba->hbalock);
4316
James Smartdb55fba2014-04-04 13:52:02 -04004317 /* Look on all the FCP Rings for the iotag */
4318 if (phba->sli_rev >= LPFC_SLI_REV4) {
James Smartcdb42be2019-01-28 11:14:21 -08004319 for (i = 0; i < phba->cfg_hdw_queue; i++) {
James Smartc00f62e2019-08-14 16:57:11 -07004320 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
James Smarta8e497d2008-08-24 21:50:11 -04004321
James Smartdb55fba2014-04-04 13:52:02 -04004322 spin_lock_irq(&pring->ring_lock);
4323 /* Retrieve everything on txq */
4324 list_splice_init(&pring->txq, &txq);
James Smartc1dd9112018-01-30 15:58:57 -08004325 list_for_each_entry_safe(piocb, next_iocb,
4326 &pring->txcmplq, list)
4327 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartdb55fba2014-04-04 13:52:02 -04004328 /* Retrieve everything on the txcmplq */
4329 list_splice_init(&pring->txcmplq, &txcmplq);
4330 pring->txq_cnt = 0;
4331 pring->txcmplq_cnt = 0;
4332 spin_unlock_irq(&pring->ring_lock);
4333
4334 /* Flush the txq */
4335 lpfc_sli_cancel_iocbs(phba, &txq,
4336 IOSTAT_LOCAL_REJECT,
4337 IOERR_SLI_DOWN);
4338 /* Flush the txcmpq */
4339 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4340 IOSTAT_LOCAL_REJECT,
4341 IOERR_SLI_DOWN);
4342 }
4343 } else {
James Smart895427b2017-02-12 13:52:30 -08004344 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04004345
4346 spin_lock_irq(&phba->hbalock);
4347 /* Retrieve everything on txq */
4348 list_splice_init(&pring->txq, &txq);
James Smartc1dd9112018-01-30 15:58:57 -08004349 list_for_each_entry_safe(piocb, next_iocb,
4350 &pring->txcmplq, list)
4351 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smartdb55fba2014-04-04 13:52:02 -04004352 /* Retrieve everything on the txcmplq */
4353 list_splice_init(&pring->txcmplq, &txcmplq);
4354 pring->txq_cnt = 0;
4355 pring->txcmplq_cnt = 0;
4356 spin_unlock_irq(&phba->hbalock);
4357
4358 /* Flush the txq */
4359 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4360 IOERR_SLI_DOWN);
4361 /* Flush the txcmpq */
4362 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4363 IOERR_SLI_DOWN);
4364 }
James Smarta8e497d2008-08-24 21:50:11 -04004365}
4366
4367/**
James Smart3772a992009-05-22 14:50:54 -04004368 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
James Smarte59058c2008-08-24 21:49:00 -04004369 * @phba: Pointer to HBA context object.
4370 * @mask: Bit mask to be checked.
4371 *
4372 * This function reads the host status register and compares
4373 * with the provided bit mask to check if HBA completed
4374 * the restart. This function will wait in a loop for the
4375 * HBA to complete restart. If the HBA does not restart within
4376 * 15 iterations, the function will reset the HBA again. The
4377 * function returns 1 when HBA fail to restart otherwise returns
4378 * zero.
4379 **/
James Smart3772a992009-05-22 14:50:54 -04004380static int
4381lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea31012005-04-17 16:05:31 -05004382{
Jamie Wellnitz41415862006-02-28 19:25:27 -05004383 uint32_t status;
4384 int i = 0;
4385 int retval = 0;
dea31012005-04-17 16:05:31 -05004386
Jamie Wellnitz41415862006-02-28 19:25:27 -05004387 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004388 if (lpfc_readl(phba->HSregaddr, &status))
4389 return 1;
dea31012005-04-17 16:05:31 -05004390
James Smartd2f25472021-01-04 10:02:27 -08004391 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4392
Jamie Wellnitz41415862006-02-28 19:25:27 -05004393 /*
4394 * Check status register every 100ms for 5 retries, then every
4395 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4396 * every 2.5 sec for 4.
4397 * Break our of the loop if errors occurred during init.
4398 */
4399 while (((status & mask) != mask) &&
4400 !(status & HS_FFERM) &&
4401 i++ < 20) {
dea31012005-04-17 16:05:31 -05004402
Jamie Wellnitz41415862006-02-28 19:25:27 -05004403 if (i <= 5)
4404 msleep(10);
4405 else if (i <= 10)
4406 msleep(500);
4407 else
4408 msleep(2500);
dea31012005-04-17 16:05:31 -05004409
Jamie Wellnitz41415862006-02-28 19:25:27 -05004410 if (i == 15) {
James Smart2e0fef82007-06-17 19:56:36 -05004411 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05004412 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004413 lpfc_sli_brdrestart(phba);
4414 }
4415 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004416 if (lpfc_readl(phba->HSregaddr, &status)) {
4417 retval = 1;
4418 break;
4419 }
dea31012005-04-17 16:05:31 -05004420 }
dea31012005-04-17 16:05:31 -05004421
Jamie Wellnitz41415862006-02-28 19:25:27 -05004422 /* Check to see if any errors occurred during init */
4423 if ((status & HS_FFERM) || (i >= 20)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07004424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte40a02c2010-02-26 14:13:54 -05004425 "2751 Adapter failed to restart, "
4426 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4427 status,
4428 readl(phba->MBslimaddr + 0xa8),
4429 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004430 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004431 retval = 1;
4432 }
dea31012005-04-17 16:05:31 -05004433
Jamie Wellnitz41415862006-02-28 19:25:27 -05004434 return retval;
dea31012005-04-17 16:05:31 -05004435}
4436
James Smartda0436e2009-05-22 14:51:39 -04004437/**
4438 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4439 * @phba: Pointer to HBA context object.
4440 * @mask: Bit mask to be checked.
4441 *
4442 * This function checks the host status register to check if HBA is
4443 * ready. This function will wait in a loop for the HBA to be ready
4444 * If the HBA is not ready , the function will will reset the HBA PCI
4445 * function again. The function returns 1 when HBA fail to be ready
4446 * otherwise returns zero.
4447 **/
4448static int
4449lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4450{
4451 uint32_t status;
4452 int retval = 0;
4453
4454 /* Read the HBA Host Status Register */
4455 status = lpfc_sli4_post_status_check(phba);
4456
4457 if (status) {
4458 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4459 lpfc_sli_brdrestart(phba);
4460 status = lpfc_sli4_post_status_check(phba);
4461 }
4462
4463 /* Check to see if any errors occurred during init */
4464 if (status) {
4465 phba->link_state = LPFC_HBA_ERROR;
4466 retval = 1;
4467 } else
4468 phba->sli4_hba.intr_enable = 0;
4469
4470 return retval;
4471}
4472
4473/**
4474 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4475 * @phba: Pointer to HBA context object.
4476 * @mask: Bit mask to be checked.
4477 *
4478 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4479 * from the API jump table function pointer from the lpfc_hba struct.
4480 **/
4481int
4482lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4483{
4484 return phba->lpfc_sli_brdready(phba, mask);
4485}
4486
James Smart92908312006-03-07 15:04:13 -05004487#define BARRIER_TEST_PATTERN (0xdeadbeef)
4488
James Smarte59058c2008-08-24 21:49:00 -04004489/**
James Smart3621a712009-04-06 18:47:14 -04004490 * lpfc_reset_barrier - Make HBA ready for HBA reset
James Smarte59058c2008-08-24 21:49:00 -04004491 * @phba: Pointer to HBA context object.
4492 *
James Smart1b511972011-12-13 13:23:09 -05004493 * This function is called before resetting an HBA. This function is called
4494 * with hbalock held and requests HBA to quiesce DMAs before a reset.
James Smarte59058c2008-08-24 21:49:00 -04004495 **/
James Smart2e0fef82007-06-17 19:56:36 -05004496void lpfc_reset_barrier(struct lpfc_hba *phba)
James Smart92908312006-03-07 15:04:13 -05004497{
James Smart65a29c12006-07-06 15:50:50 -04004498 uint32_t __iomem *resp_buf;
4499 uint32_t __iomem *mbox_buf;
James Smart92908312006-03-07 15:04:13 -05004500 volatile uint32_t mbox;
James Smart9940b972011-03-11 16:06:12 -05004501 uint32_t hc_copy, ha_copy, resp_data;
James Smart92908312006-03-07 15:04:13 -05004502 int i;
4503 uint8_t hdrtype;
4504
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01004505 lockdep_assert_held(&phba->hbalock);
4506
James Smart92908312006-03-07 15:04:13 -05004507 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4508 if (hdrtype != 0x80 ||
4509 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4510 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4511 return;
4512
4513 /*
4514 * Tell the other part of the chip to suspend temporarily all
4515 * its DMA activity.
4516 */
James Smart65a29c12006-07-06 15:50:50 -04004517 resp_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004518
4519 /* Disable the error attention */
James Smart9940b972011-03-11 16:06:12 -05004520 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4521 return;
James Smart92908312006-03-07 15:04:13 -05004522 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4523 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004524 phba->link_flag |= LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004525
James Smart9940b972011-03-11 16:06:12 -05004526 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4527 return;
4528 if (ha_copy & HA_ERATT) {
James Smart92908312006-03-07 15:04:13 -05004529 /* Clear Chip error bit */
4530 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004531 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004532 }
4533
4534 mbox = 0;
4535 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4536 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4537
4538 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
James Smart65a29c12006-07-06 15:50:50 -04004539 mbox_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004540 writel(mbox, mbox_buf);
4541
James Smart9940b972011-03-11 16:06:12 -05004542 for (i = 0; i < 50; i++) {
4543 if (lpfc_readl((resp_buf + 1), &resp_data))
4544 return;
4545 if (resp_data != ~(BARRIER_TEST_PATTERN))
4546 mdelay(1);
4547 else
4548 break;
4549 }
4550 resp_data = 0;
4551 if (lpfc_readl((resp_buf + 1), &resp_data))
4552 return;
4553 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
James Smartf4b4c682009-05-22 14:53:12 -04004554 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
James Smart2e0fef82007-06-17 19:56:36 -05004555 phba->pport->stopped)
James Smart92908312006-03-07 15:04:13 -05004556 goto restore_hc;
4557 else
4558 goto clear_errat;
4559 }
4560
4561 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
James Smart9940b972011-03-11 16:06:12 -05004562 resp_data = 0;
4563 for (i = 0; i < 500; i++) {
4564 if (lpfc_readl(resp_buf, &resp_data))
4565 return;
4566 if (resp_data != mbox)
4567 mdelay(1);
4568 else
4569 break;
4570 }
James Smart92908312006-03-07 15:04:13 -05004571
4572clear_errat:
4573
James Smart9940b972011-03-11 16:06:12 -05004574 while (++i < 500) {
4575 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4576 return;
4577 if (!(ha_copy & HA_ERATT))
4578 mdelay(1);
4579 else
4580 break;
4581 }
James Smart92908312006-03-07 15:04:13 -05004582
4583 if (readl(phba->HAregaddr) & HA_ERATT) {
4584 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004585 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004586 }
4587
4588restore_hc:
James Smart2e0fef82007-06-17 19:56:36 -05004589 phba->link_flag &= ~LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004590 writel(hc_copy, phba->HCregaddr);
4591 readl(phba->HCregaddr); /* flush */
4592}
4593
James Smarte59058c2008-08-24 21:49:00 -04004594/**
James Smart3621a712009-04-06 18:47:14 -04004595 * lpfc_sli_brdkill - Issue a kill_board mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004596 * @phba: Pointer to HBA context object.
4597 *
4598 * This function issues a kill_board mailbox command and waits for
4599 * the error attention interrupt. This function is called for stopping
4600 * the firmware processing. The caller is not required to hold any
4601 * locks. This function calls lpfc_hba_down_post function to free
4602 * any pending commands after the kill. The function will return 1 when it
4603 * fails to kill the board else will return 0.
4604 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004605int
James Smart2e0fef82007-06-17 19:56:36 -05004606lpfc_sli_brdkill(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05004607{
Jamie Wellnitz41415862006-02-28 19:25:27 -05004608 struct lpfc_sli *psli;
4609 LPFC_MBOXQ_t *pmb;
4610 uint32_t status;
4611 uint32_t ha_copy;
4612 int retval;
4613 int i = 0;
4614
4615 psli = &phba->sli;
4616
4617 /* Kill HBA */
James Smarted957682007-06-17 19:56:37 -05004618 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004619 "0329 Kill HBA Data: x%x x%x\n",
4620 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004621
James Smart98c9ea52007-10-27 13:37:33 -04004622 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4623 if (!pmb)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004624 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004625
4626 /* Disable the error attention */
James Smart2e0fef82007-06-17 19:56:36 -05004627 spin_lock_irq(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -05004628 if (lpfc_readl(phba->HCregaddr, &status)) {
4629 spin_unlock_irq(&phba->hbalock);
4630 mempool_free(pmb, phba->mbox_mem_pool);
4631 return 1;
4632 }
Jamie Wellnitz41415862006-02-28 19:25:27 -05004633 status &= ~HC_ERINT_ENA;
4634 writel(status, phba->HCregaddr);
4635 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004636 phba->link_flag |= LS_IGNORE_ERATT;
4637 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004638
4639 lpfc_kill_board(phba, pmb);
4640 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4641 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4642
4643 if (retval != MBX_SUCCESS) {
4644 if (retval != MBX_BUSY)
4645 mempool_free(pmb, phba->mbox_mem_pool);
Dick Kennedy372c1872020-06-30 14:50:00 -07004646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte40a02c2010-02-26 14:13:54 -05004647 "2752 KILL_BOARD command failed retval %d\n",
4648 retval);
James Smart2e0fef82007-06-17 19:56:36 -05004649 spin_lock_irq(&phba->hbalock);
4650 phba->link_flag &= ~LS_IGNORE_ERATT;
4651 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004652 return 1;
4653 }
4654
James Smartf4b4c682009-05-22 14:53:12 -04004655 spin_lock_irq(&phba->hbalock);
4656 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4657 spin_unlock_irq(&phba->hbalock);
James Smart92908312006-03-07 15:04:13 -05004658
Jamie Wellnitz41415862006-02-28 19:25:27 -05004659 mempool_free(pmb, phba->mbox_mem_pool);
4660
4661 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4662 * attention every 100ms for 3 seconds. If we don't get ERATT after
4663 * 3 seconds we still set HBA_ERROR state because the status of the
4664 * board is now undefined.
4665 */
James Smart9940b972011-03-11 16:06:12 -05004666 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4667 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004668 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4669 mdelay(100);
James Smart9940b972011-03-11 16:06:12 -05004670 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4671 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004672 }
4673
4674 del_timer_sync(&psli->mbox_tmo);
James Smart92908312006-03-07 15:04:13 -05004675 if (ha_copy & HA_ERATT) {
4676 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004677 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004678 }
James Smart2e0fef82007-06-17 19:56:36 -05004679 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004680 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart04c68492009-05-22 14:52:52 -04004681 psli->mbox_active = NULL;
James Smart2e0fef82007-06-17 19:56:36 -05004682 phba->link_flag &= ~LS_IGNORE_ERATT;
4683 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004684
Jamie Wellnitz41415862006-02-28 19:25:27 -05004685 lpfc_hba_down_post(phba);
James Smart2e0fef82007-06-17 19:56:36 -05004686 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004687
James Smart2e0fef82007-06-17 19:56:36 -05004688 return ha_copy & HA_ERATT ? 0 : 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004689}
4690
James Smarte59058c2008-08-24 21:49:00 -04004691/**
James Smart3772a992009-05-22 14:50:54 -04004692 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
James Smarte59058c2008-08-24 21:49:00 -04004693 * @phba: Pointer to HBA context object.
4694 *
4695 * This function resets the HBA by writing HC_INITFF to the control
4696 * register. After the HBA resets, this function resets all the iocb ring
4697 * indices. This function disables PCI layer parity checking during
4698 * the reset.
4699 * This function returns 0 always.
4700 * The caller is not required to hold any locks.
4701 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004702int
James Smart2e0fef82007-06-17 19:56:36 -05004703lpfc_sli_brdreset(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004704{
4705 struct lpfc_sli *psli;
dea31012005-04-17 16:05:31 -05004706 struct lpfc_sli_ring *pring;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004707 uint16_t cfg_value;
dea31012005-04-17 16:05:31 -05004708 int i;
dea31012005-04-17 16:05:31 -05004709
Jamie Wellnitz41415862006-02-28 19:25:27 -05004710 psli = &phba->sli;
dea31012005-04-17 16:05:31 -05004711
Jamie Wellnitz41415862006-02-28 19:25:27 -05004712 /* Reset HBA */
4713 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004714 "0325 Reset HBA Data: x%x x%x\n",
James Smart4492b732017-04-27 15:08:26 -07004715 (phba->pport) ? phba->pport->port_state : 0,
4716 psli->sli_flag);
dea31012005-04-17 16:05:31 -05004717
4718 /* perform board reset */
4719 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004720 phba->link_events = 0;
James Smartd2f25472021-01-04 10:02:27 -08004721 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
James Smart4492b732017-04-27 15:08:26 -07004722 if (phba->pport) {
4723 phba->pport->fc_myDID = 0;
4724 phba->pport->fc_prevDID = 0;
4725 }
dea31012005-04-17 16:05:31 -05004726
Jamie Wellnitz41415862006-02-28 19:25:27 -05004727 /* Turn off parity checking and serr during the physical reset */
James Smart32a93102019-03-12 16:30:13 -07004728 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4729 return -EIO;
4730
Jamie Wellnitz41415862006-02-28 19:25:27 -05004731 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4732 (cfg_value &
4733 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4734
James Smart3772a992009-05-22 14:50:54 -04004735 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4736
Jamie Wellnitz41415862006-02-28 19:25:27 -05004737 /* Now toggle INITFF bit in the Host Control Register */
4738 writel(HC_INITFF, phba->HCregaddr);
4739 mdelay(1);
4740 readl(phba->HCregaddr); /* flush */
4741 writel(0, phba->HCregaddr);
4742 readl(phba->HCregaddr); /* flush */
4743
4744 /* Restore PCI cmd register */
4745 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea31012005-04-17 16:05:31 -05004746
4747 /* Initialize relevant SLI info */
Jamie Wellnitz41415862006-02-28 19:25:27 -05004748 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08004749 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05004750 pring->flag = 0;
James Smart7e56aa22012-08-03 12:35:34 -04004751 pring->sli.sli3.rspidx = 0;
4752 pring->sli.sli3.next_cmdidx = 0;
4753 pring->sli.sli3.local_getidx = 0;
4754 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -05004755 pring->missbufcnt = 0;
4756 }
dea31012005-04-17 16:05:31 -05004757
James Smart2e0fef82007-06-17 19:56:36 -05004758 phba->link_state = LPFC_WARM_START;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004759 return 0;
4760}
4761
James Smarte59058c2008-08-24 21:49:00 -04004762/**
James Smartda0436e2009-05-22 14:51:39 -04004763 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4764 * @phba: Pointer to HBA context object.
4765 *
4766 * This function resets a SLI4 HBA. This function disables PCI layer parity
4767 * checking during resets the device. The caller is not required to hold
4768 * any locks.
4769 *
James Smart8c24a4f2019-08-14 16:56:53 -07004770 * This function returns 0 on success else returns negative error code.
James Smartda0436e2009-05-22 14:51:39 -04004771 **/
4772int
4773lpfc_sli4_brdreset(struct lpfc_hba *phba)
4774{
4775 struct lpfc_sli *psli = &phba->sli;
4776 uint16_t cfg_value;
James Smart02936352014-04-04 13:52:12 -04004777 int rc = 0;
James Smartda0436e2009-05-22 14:51:39 -04004778
4779 /* Reset HBA */
4780 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart02936352014-04-04 13:52:12 -04004781 "0295 Reset HBA Data: x%x x%x x%x\n",
4782 phba->pport->port_state, psli->sli_flag,
4783 phba->hba_flag);
James Smartda0436e2009-05-22 14:51:39 -04004784
4785 /* perform board reset */
4786 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004787 phba->link_events = 0;
James Smartda0436e2009-05-22 14:51:39 -04004788 phba->pport->fc_myDID = 0;
4789 phba->pport->fc_prevDID = 0;
4790
James Smartda0436e2009-05-22 14:51:39 -04004791 spin_lock_irq(&phba->hbalock);
4792 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4793 phba->fcf.fcf_flag = 0;
James Smartda0436e2009-05-22 14:51:39 -04004794 spin_unlock_irq(&phba->hbalock);
4795
James Smart02936352014-04-04 13:52:12 -04004796 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4797 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4798 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4799 return rc;
4800 }
4801
James Smartda0436e2009-05-22 14:51:39 -04004802 /* Now physically reset the device */
4803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4804 "0389 Performing PCI function reset!\n");
James Smartbe858b62010-12-15 17:57:20 -05004805
4806 /* Turn off parity checking and serr during the physical reset */
James Smart32a93102019-03-12 16:30:13 -07004807 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4808 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4809 "3205 PCI read Config failed\n");
4810 return -EIO;
4811 }
4812
James Smartbe858b62010-12-15 17:57:20 -05004813 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4814 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4815
James Smart88318812012-09-29 11:29:29 -04004816 /* Perform FCoE PCI function reset before freeing queue memory */
James Smart27b01b82012-05-09 21:19:44 -04004817 rc = lpfc_pci_function_reset(phba);
James Smartda0436e2009-05-22 14:51:39 -04004818
James Smartbe858b62010-12-15 17:57:20 -05004819 /* Restore PCI cmd register */
4820 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4821
James Smart27b01b82012-05-09 21:19:44 -04004822 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004823}
4824
4825/**
4826 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
James Smarte59058c2008-08-24 21:49:00 -04004827 * @phba: Pointer to HBA context object.
4828 *
4829 * This function is called in the SLI initialization code path to
4830 * restart the HBA. The caller is not required to hold any lock.
4831 * This function writes MBX_RESTART mailbox command to the SLIM and
4832 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4833 * function to free any pending commands. The function enables
4834 * POST only during the first initialization. The function returns zero.
4835 * The function does not guarantee completion of MBX_RESTART mailbox
4836 * command before the return of this function.
4837 **/
James Smartda0436e2009-05-22 14:51:39 -04004838static int
4839lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004840{
4841 MAILBOX_t *mb;
4842 struct lpfc_sli *psli;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004843 volatile uint32_t word0;
4844 void __iomem *to_slim;
James Smart0d878412009-10-02 15:16:56 -04004845 uint32_t hba_aer_enabled;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004846
James Smart2e0fef82007-06-17 19:56:36 -05004847 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004848
James Smart0d878412009-10-02 15:16:56 -04004849 /* Take PCIe device Advanced Error Reporting (AER) state */
4850 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4851
Jamie Wellnitz41415862006-02-28 19:25:27 -05004852 psli = &phba->sli;
4853
4854 /* Restart HBA */
4855 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004856 "0337 Restart HBA Data: x%x x%x\n",
James Smart4492b732017-04-27 15:08:26 -07004857 (phba->pport) ? phba->pport->port_state : 0,
4858 psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004859
4860 word0 = 0;
4861 mb = (MAILBOX_t *) &word0;
4862 mb->mbxCommand = MBX_RESTART;
4863 mb->mbxHc = 1;
4864
James Smart92908312006-03-07 15:04:13 -05004865 lpfc_reset_barrier(phba);
4866
Jamie Wellnitz41415862006-02-28 19:25:27 -05004867 to_slim = phba->MBslimaddr;
4868 writel(*(uint32_t *) mb, to_slim);
4869 readl(to_slim); /* flush */
4870
4871 /* Only skip post after fc_ffinit is completed */
James Smart4492b732017-04-27 15:08:26 -07004872 if (phba->pport && phba->pport->port_state)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004873 word0 = 1; /* This is really setting up word1 */
James Smarteaf15d52008-12-04 22:39:29 -05004874 else
Jamie Wellnitz41415862006-02-28 19:25:27 -05004875 word0 = 0; /* This is really setting up word1 */
James Smart65a29c12006-07-06 15:50:50 -04004876 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004877 writel(*(uint32_t *) mb, to_slim);
4878 readl(to_slim); /* flush */
4879
4880 lpfc_sli_brdreset(phba);
James Smart4492b732017-04-27 15:08:26 -07004881 if (phba->pport)
4882 phba->pport->stopped = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004883 phba->link_state = LPFC_INIT_START;
James Smartda0436e2009-05-22 14:51:39 -04004884 phba->hba_flag = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004885 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004886
James Smart64ba8812006-08-02 15:24:34 -04004887 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
Arnd Bergmannc4d6204d2018-06-18 17:28:23 +02004888 psli->stats_start = ktime_get_seconds();
James Smart64ba8812006-08-02 15:24:34 -04004889
James Smarteaf15d52008-12-04 22:39:29 -05004890 /* Give the INITFF and Post time to settle. */
4891 mdelay(100);
dea31012005-04-17 16:05:31 -05004892
James Smart0d878412009-10-02 15:16:56 -04004893 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4894 if (hba_aer_enabled)
4895 pci_disable_pcie_error_reporting(phba->pcidev);
4896
Jamie Wellnitz41415862006-02-28 19:25:27 -05004897 lpfc_hba_down_post(phba);
dea31012005-04-17 16:05:31 -05004898
4899 return 0;
4900}
4901
James Smarte59058c2008-08-24 21:49:00 -04004902/**
James Smartda0436e2009-05-22 14:51:39 -04004903 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4904 * @phba: Pointer to HBA context object.
4905 *
4906 * This function is called in the SLI initialization code path to restart
4907 * a SLI4 HBA. The caller is not required to hold any lock.
4908 * At the end of the function, it calls lpfc_hba_down_post function to
4909 * free any pending commands.
4910 **/
4911static int
4912lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4913{
4914 struct lpfc_sli *psli = &phba->sli;
James Smart75baf692010-06-08 18:31:21 -04004915 uint32_t hba_aer_enabled;
James Smart27b01b82012-05-09 21:19:44 -04004916 int rc;
James Smartda0436e2009-05-22 14:51:39 -04004917
4918 /* Restart HBA */
4919 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4920 "0296 Restart HBA Data: x%x x%x\n",
4921 phba->pport->port_state, psli->sli_flag);
4922
James Smart75baf692010-06-08 18:31:21 -04004923 /* Take PCIe device Advanced Error Reporting (AER) state */
4924 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4925
James Smart27b01b82012-05-09 21:19:44 -04004926 rc = lpfc_sli4_brdreset(phba);
James Smart4fb86a62019-09-03 14:54:41 -07004927 if (rc) {
4928 phba->link_state = LPFC_HBA_ERROR;
4929 goto hba_down_queue;
4930 }
James Smartda0436e2009-05-22 14:51:39 -04004931
4932 spin_lock_irq(&phba->hbalock);
4933 phba->pport->stopped = 0;
4934 phba->link_state = LPFC_INIT_START;
4935 phba->hba_flag = 0;
4936 spin_unlock_irq(&phba->hbalock);
4937
4938 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
Arnd Bergmannc4d6204d2018-06-18 17:28:23 +02004939 psli->stats_start = ktime_get_seconds();
James Smartda0436e2009-05-22 14:51:39 -04004940
James Smart75baf692010-06-08 18:31:21 -04004941 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4942 if (hba_aer_enabled)
4943 pci_disable_pcie_error_reporting(phba->pcidev);
4944
James Smart4fb86a62019-09-03 14:54:41 -07004945hba_down_queue:
James Smartda0436e2009-05-22 14:51:39 -04004946 lpfc_hba_down_post(phba);
James Smart569dbe82017-06-15 22:56:47 -07004947 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04004948
James Smart27b01b82012-05-09 21:19:44 -04004949 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004950}
4951
4952/**
4953 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4954 * @phba: Pointer to HBA context object.
4955 *
4956 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4957 * API jump table function pointer from the lpfc_hba struct.
4958**/
4959int
4960lpfc_sli_brdrestart(struct lpfc_hba *phba)
4961{
4962 return phba->lpfc_sli_brdrestart(phba);
4963}
4964
4965/**
James Smart3621a712009-04-06 18:47:14 -04004966 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
James Smarte59058c2008-08-24 21:49:00 -04004967 * @phba: Pointer to HBA context object.
4968 *
4969 * This function is called after a HBA restart to wait for successful
4970 * restart of the HBA. Successful restart of the HBA is indicated by
4971 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4972 * iteration, the function will restart the HBA again. The function returns
4973 * zero if HBA successfully restarted else returns negative error code.
4974 **/
James Smart4492b732017-04-27 15:08:26 -07004975int
dea31012005-04-17 16:05:31 -05004976lpfc_sli_chipset_init(struct lpfc_hba *phba)
4977{
4978 uint32_t status, i = 0;
4979
4980 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004981 if (lpfc_readl(phba->HSregaddr, &status))
4982 return -EIO;
dea31012005-04-17 16:05:31 -05004983
4984 /* Check status register to see what current state is */
4985 i = 0;
4986 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4987
James Smartdcf2a4e2010-09-29 11:18:53 -04004988 /* Check every 10ms for 10 retries, then every 100ms for 90
4989 * retries, then every 1 sec for 50 retires for a total of
4990 * ~60 seconds before reset the board again and check every
4991 * 1 sec for 50 retries. The up to 60 seconds before the
4992 * board ready is required by the Falcon FIPS zeroization
4993 * complete, and any reset the board in between shall cause
4994 * restart of zeroization, further delay the board ready.
dea31012005-04-17 16:05:31 -05004995 */
James Smartdcf2a4e2010-09-29 11:18:53 -04004996 if (i++ >= 200) {
dea31012005-04-17 16:05:31 -05004997 /* Adapter failed to init, timeout, status reg
4998 <status> */
Dick Kennedy372c1872020-06-30 14:50:00 -07004999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04005000 "0436 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05005001 "timeout, status reg x%x, "
5002 "FW Data: A8 x%x AC x%x\n", status,
5003 readl(phba->MBslimaddr + 0xa8),
5004 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05005005 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05005006 return -ETIMEDOUT;
5007 }
5008
5009 /* Check to see if any errors occurred during init */
5010 if (status & HS_FFERM) {
5011 /* ERROR: During chipset initialization */
5012 /* Adapter failed to init, chipset, status reg
5013 <status> */
Dick Kennedy372c1872020-06-30 14:50:00 -07005014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04005015 "0437 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05005016 "chipset, status reg x%x, "
5017 "FW Data: A8 x%x AC x%x\n", status,
5018 readl(phba->MBslimaddr + 0xa8),
5019 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05005020 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05005021 return -EIO;
5022 }
5023
James Smartdcf2a4e2010-09-29 11:18:53 -04005024 if (i <= 10)
dea31012005-04-17 16:05:31 -05005025 msleep(10);
James Smartdcf2a4e2010-09-29 11:18:53 -04005026 else if (i <= 100)
5027 msleep(100);
5028 else
5029 msleep(1000);
dea31012005-04-17 16:05:31 -05005030
James Smartdcf2a4e2010-09-29 11:18:53 -04005031 if (i == 150) {
5032 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05005033 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05005034 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05005035 }
5036 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05005037 if (lpfc_readl(phba->HSregaddr, &status))
5038 return -EIO;
dea31012005-04-17 16:05:31 -05005039 }
5040
5041 /* Check to see if any errors occurred during init */
5042 if (status & HS_FFERM) {
5043 /* ERROR: During chipset initialization */
5044 /* Adapter failed to init, chipset, status reg <status> */
Dick Kennedy372c1872020-06-30 14:50:00 -07005045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04005046 "0438 Adapter failed to init, chipset, "
James Smart09372822008-01-11 01:52:54 -05005047 "status reg x%x, "
5048 "FW Data: A8 x%x AC x%x\n", status,
5049 readl(phba->MBslimaddr + 0xa8),
5050 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05005051 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05005052 return -EIO;
5053 }
5054
James Smartd2f25472021-01-04 10:02:27 -08005055 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5056
dea31012005-04-17 16:05:31 -05005057 /* Clear all interrupt enable conditions */
5058 writel(0, phba->HCregaddr);
5059 readl(phba->HCregaddr); /* flush */
5060
5061 /* setup host attn register */
5062 writel(0xffffffff, phba->HAregaddr);
5063 readl(phba->HAregaddr); /* flush */
5064 return 0;
5065}
5066
James Smarte59058c2008-08-24 21:49:00 -04005067/**
James Smart3621a712009-04-06 18:47:14 -04005068 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
James Smarte59058c2008-08-24 21:49:00 -04005069 *
5070 * This function calculates and returns the number of HBQs required to be
5071 * configured.
5072 **/
James Smart78b2d852007-08-02 11:10:21 -04005073int
James Smarted957682007-06-17 19:56:37 -05005074lpfc_sli_hbq_count(void)
5075{
James Smart92d7f7b2007-06-17 19:56:38 -05005076 return ARRAY_SIZE(lpfc_hbq_defs);
James Smarted957682007-06-17 19:56:37 -05005077}
5078
James Smarte59058c2008-08-24 21:49:00 -04005079/**
James Smart3621a712009-04-06 18:47:14 -04005080 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
James Smarte59058c2008-08-24 21:49:00 -04005081 *
5082 * This function adds the number of hbq entries in every HBQ to get
5083 * the total number of hbq entries required for the HBA and returns
5084 * the total count.
5085 **/
James Smarted957682007-06-17 19:56:37 -05005086static int
5087lpfc_sli_hbq_entry_count(void)
5088{
5089 int hbq_count = lpfc_sli_hbq_count();
5090 int count = 0;
5091 int i;
5092
5093 for (i = 0; i < hbq_count; ++i)
James Smart92d7f7b2007-06-17 19:56:38 -05005094 count += lpfc_hbq_defs[i]->entry_count;
James Smarted957682007-06-17 19:56:37 -05005095 return count;
5096}
5097
James Smarte59058c2008-08-24 21:49:00 -04005098/**
James Smart3621a712009-04-06 18:47:14 -04005099 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
James Smarte59058c2008-08-24 21:49:00 -04005100 *
5101 * This function calculates amount of memory required for all hbq entries
5102 * to be configured and returns the total memory required.
5103 **/
dea31012005-04-17 16:05:31 -05005104int
James Smarted957682007-06-17 19:56:37 -05005105lpfc_sli_hbq_size(void)
5106{
5107 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5108}
5109
James Smarte59058c2008-08-24 21:49:00 -04005110/**
James Smart3621a712009-04-06 18:47:14 -04005111 * lpfc_sli_hbq_setup - configure and initialize HBQs
James Smarte59058c2008-08-24 21:49:00 -04005112 * @phba: Pointer to HBA context object.
5113 *
5114 * This function is called during the SLI initialization to configure
5115 * all the HBQs and post buffers to the HBQ. The caller is not
5116 * required to hold any locks. This function will return zero if successful
5117 * else it will return negative error code.
5118 **/
James Smarted957682007-06-17 19:56:37 -05005119static int
5120lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5121{
5122 int hbq_count = lpfc_sli_hbq_count();
5123 LPFC_MBOXQ_t *pmb;
5124 MAILBOX_t *pmbox;
5125 uint32_t hbqno;
5126 uint32_t hbq_entry_index;
James Smarted957682007-06-17 19:56:37 -05005127
James Smart92d7f7b2007-06-17 19:56:38 -05005128 /* Get a Mailbox buffer to setup mailbox
5129 * commands for HBA initialization
5130 */
James Smarted957682007-06-17 19:56:37 -05005131 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5132
5133 if (!pmb)
5134 return -ENOMEM;
5135
James Smart04c68492009-05-22 14:52:52 -04005136 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05005137
5138 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5139 phba->link_state = LPFC_INIT_MBX_CMDS;
James Smart3163f722008-02-08 18:50:25 -05005140 phba->hbq_in_use = 1;
James Smarted957682007-06-17 19:56:37 -05005141
5142 hbq_entry_index = 0;
5143 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5144 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5145 phba->hbqs[hbqno].hbqPutIdx = 0;
5146 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5147 phba->hbqs[hbqno].entry_count =
James Smart92d7f7b2007-06-17 19:56:38 -05005148 lpfc_hbq_defs[hbqno]->entry_count;
James Smart51ef4c22007-08-02 11:10:31 -04005149 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5150 hbq_entry_index, pmb);
James Smarted957682007-06-17 19:56:37 -05005151 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5152
5153 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5154 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5155 mbxStatus <status>, ring <num> */
5156
5157 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05005158 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04005159 "1805 Adapter failed to init. "
James Smarted957682007-06-17 19:56:37 -05005160 "Data: x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04005161 pmbox->mbxCommand,
James Smarted957682007-06-17 19:56:37 -05005162 pmbox->mbxStatus, hbqno);
5163
5164 phba->link_state = LPFC_HBA_ERROR;
5165 mempool_free(pmb, phba->mbox_mem_pool);
James Smart6e7288d2010-06-07 15:23:35 -04005166 return -ENXIO;
James Smarted957682007-06-17 19:56:37 -05005167 }
5168 }
5169 phba->hbq_count = hbq_count;
5170
James Smarted957682007-06-17 19:56:37 -05005171 mempool_free(pmb, phba->mbox_mem_pool);
5172
James Smart92d7f7b2007-06-17 19:56:38 -05005173 /* Initially populate or replenish the HBQs */
James Smartd7c255b2008-08-24 21:50:00 -04005174 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5175 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
James Smarted957682007-06-17 19:56:37 -05005176 return 0;
5177}
5178
James Smarte59058c2008-08-24 21:49:00 -04005179/**
James Smart4f774512009-05-22 14:52:35 -04005180 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5181 * @phba: Pointer to HBA context object.
5182 *
5183 * This function is called during the SLI initialization to configure
5184 * all the HBQs and post buffers to the HBQ. The caller is not
5185 * required to hold any locks. This function will return zero if successful
5186 * else it will return negative error code.
5187 **/
5188static int
5189lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5190{
5191 phba->hbq_in_use = 1;
James Smart999fbbc2019-12-18 15:58:06 -08005192 /**
5193 * Specific case when the MDS diagnostics is enabled and supported.
5194 * The receive buffer count is truncated to manage the incoming
5195 * traffic.
5196 **/
5197 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5198 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5199 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5200 else
5201 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5202 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
James Smart4f774512009-05-22 14:52:35 -04005203 phba->hbq_count = 1;
James Smart895427b2017-02-12 13:52:30 -08005204 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
James Smart4f774512009-05-22 14:52:35 -04005205 /* Initially populate or replenish the HBQs */
James Smart4f774512009-05-22 14:52:35 -04005206 return 0;
5207}
5208
5209/**
James Smart3621a712009-04-06 18:47:14 -04005210 * lpfc_sli_config_port - Issue config port mailbox command
James Smarte59058c2008-08-24 21:49:00 -04005211 * @phba: Pointer to HBA context object.
5212 * @sli_mode: sli mode - 2/3
5213 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08005214 * This function is called by the sli initialization code path
James Smarte59058c2008-08-24 21:49:00 -04005215 * to issue config_port mailbox command. This function restarts the
5216 * HBA firmware and issues a config_port mailbox command to configure
5217 * the SLI interface in the sli mode specified by sli_mode
5218 * variable. The caller is not required to hold any locks.
5219 * The function returns 0 if successful, else returns negative error
5220 * code.
5221 **/
James Smart93996272008-08-24 21:50:30 -04005222int
5223lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea31012005-04-17 16:05:31 -05005224{
5225 LPFC_MBOXQ_t *pmb;
5226 uint32_t resetcount = 0, rc = 0, done = 0;
5227
5228 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5229 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -05005230 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05005231 return -ENOMEM;
5232 }
5233
James Smarted957682007-06-17 19:56:37 -05005234 phba->sli_rev = sli_mode;
dea31012005-04-17 16:05:31 -05005235 while (resetcount < 2 && !done) {
James Smart2e0fef82007-06-17 19:56:36 -05005236 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04005237 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05005238 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05005239 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05005240 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05005241 rc = lpfc_sli_chipset_init(phba);
5242 if (rc)
5243 break;
5244
James Smart2e0fef82007-06-17 19:56:36 -05005245 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04005246 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05005247 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05005248 resetcount++;
5249
James Smarted957682007-06-17 19:56:37 -05005250 /* Call pre CONFIG_PORT mailbox command initialization. A
5251 * value of 0 means the call was successful. Any other
5252 * nonzero value is a failure, but if ERESTART is returned,
5253 * the driver may reset the HBA and try again.
5254 */
dea31012005-04-17 16:05:31 -05005255 rc = lpfc_config_port_prep(phba);
5256 if (rc == -ERESTART) {
James Smarted957682007-06-17 19:56:37 -05005257 phba->link_state = LPFC_LINK_UNKNOWN;
dea31012005-04-17 16:05:31 -05005258 continue;
James Smart34b02dc2008-08-24 21:49:55 -04005259 } else if (rc)
dea31012005-04-17 16:05:31 -05005260 break;
James Smart6d368e52011-05-24 11:44:12 -04005261
James Smart2e0fef82007-06-17 19:56:36 -05005262 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05005263 lpfc_config_port(phba, pmb);
5264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
James Smart34b02dc2008-08-24 21:49:55 -04005265 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5266 LPFC_SLI3_HBQ_ENABLED |
5267 LPFC_SLI3_CRP_ENABLED |
James Smartbc739052010-08-04 16:11:18 -04005268 LPFC_SLI3_DSS_ENABLED);
James Smarted957682007-06-17 19:56:37 -05005269 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04005271 "0442 Adapter failed to init, mbxCmd x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05005272 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
James Smart04c68492009-05-22 14:52:52 -04005273 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
James Smart2e0fef82007-06-17 19:56:36 -05005274 spin_lock_irq(&phba->hbalock);
James Smart04c68492009-05-22 14:52:52 -04005275 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05005276 spin_unlock_irq(&phba->hbalock);
5277 rc = -ENXIO;
James Smart04c68492009-05-22 14:52:52 -04005278 } else {
5279 /* Allow asynchronous mailbox command to go through */
5280 spin_lock_irq(&phba->hbalock);
5281 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5282 spin_unlock_irq(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05005283 done = 1;
James Smartcb69f7d2011-12-13 13:21:57 -05005284
5285 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5286 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5288 "3110 Port did not grant ASABT\n");
James Smart04c68492009-05-22 14:52:52 -04005289 }
dea31012005-04-17 16:05:31 -05005290 }
James Smarted957682007-06-17 19:56:37 -05005291 if (!done) {
5292 rc = -EINVAL;
5293 goto do_prep_failed;
5294 }
James Smart04c68492009-05-22 14:52:52 -04005295 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5296 if (!pmb->u.mb.un.varCfgPort.cMA) {
James Smart34b02dc2008-08-24 21:49:55 -04005297 rc = -ENXIO;
5298 goto do_prep_failed;
5299 }
James Smart04c68492009-05-22 14:52:52 -04005300 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
James Smart34b02dc2008-08-24 21:49:55 -04005301 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04005302 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5303 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5304 phba->max_vpi : phba->max_vports;
5305
James Smart34b02dc2008-08-24 21:49:55 -04005306 } else
5307 phba->max_vpi = 0;
James Smart04c68492009-05-22 14:52:52 -04005308 if (pmb->u.mb.un.varCfgPort.gerbm)
James Smart34b02dc2008-08-24 21:49:55 -04005309 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04005310 if (pmb->u.mb.un.varCfgPort.gcrp)
James Smart34b02dc2008-08-24 21:49:55 -04005311 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
James Smart6e7288d2010-06-07 15:23:35 -04005312
5313 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5314 phba->port_gp = phba->mbox->us.s3_pgp.port;
James Smarte2a0a9d2008-12-04 22:40:02 -05005315
James Smartf44ac122018-03-05 12:04:08 -08005316 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5317 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5318 phba->cfg_enable_bg = 0;
5319 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
Dick Kennedy372c1872020-06-30 14:50:00 -07005320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte2a0a9d2008-12-04 22:40:02 -05005321 "0443 Adapter did not grant "
5322 "BlockGuard\n");
James Smartf44ac122018-03-05 12:04:08 -08005323 }
James Smarte2a0a9d2008-12-04 22:40:02 -05005324 }
James Smart34b02dc2008-08-24 21:49:55 -04005325 } else {
James Smart8f34f4c2008-12-04 22:39:23 -05005326 phba->hbq_get = NULL;
James Smart34b02dc2008-08-24 21:49:55 -04005327 phba->port_gp = phba->mbox->us.s2.port;
James Smartd7c255b2008-08-24 21:50:00 -04005328 phba->max_vpi = 0;
James Smarted957682007-06-17 19:56:37 -05005329 }
James Smart92d7f7b2007-06-17 19:56:38 -05005330do_prep_failed:
James Smarted957682007-06-17 19:56:37 -05005331 mempool_free(pmb, phba->mbox_mem_pool);
5332 return rc;
5333}
5334
James Smarte59058c2008-08-24 21:49:00 -04005335
5336/**
Masahiro Yamada183b8022017-02-27 14:29:20 -08005337 * lpfc_sli_hba_setup - SLI initialization function
James Smarte59058c2008-08-24 21:49:00 -04005338 * @phba: Pointer to HBA context object.
5339 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08005340 * This function is the main SLI initialization function. This function
5341 * is called by the HBA initialization code, HBA reset code and HBA
James Smarte59058c2008-08-24 21:49:00 -04005342 * error attention handler code. Caller is not required to hold any
5343 * locks. This function issues config_port mailbox command to configure
5344 * the SLI, setup iocb rings and HBQ rings. In the end the function
5345 * calls the config_port_post function to issue init_link mailbox
5346 * command and to start the discovery. The function will return zero
5347 * if successful, else it will return negative error code.
5348 **/
James Smarted957682007-06-17 19:56:37 -05005349int
5350lpfc_sli_hba_setup(struct lpfc_hba *phba)
5351{
5352 uint32_t rc;
James Smartd2f25472021-01-04 10:02:27 -08005353 int i;
James Smart6d368e52011-05-24 11:44:12 -04005354 int longs;
James Smarted957682007-06-17 19:56:37 -05005355
James Smartd2f25472021-01-04 10:02:27 -08005356 /* Enable ISR already does config_port because of config_msi mbx */
5357 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5358 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5359 if (rc)
5360 return -EIO;
5361 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
James Smarted957682007-06-17 19:56:37 -05005362 }
James Smartb5c53952016-03-31 14:12:30 -07005363 phba->fcp_embed_io = 0; /* SLI4 FC support only */
James Smarted957682007-06-17 19:56:37 -05005364
James Smart0d878412009-10-02 15:16:56 -04005365 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5366 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5367 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5368 if (!rc) {
5369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5370 "2709 This device supports "
5371 "Advanced Error Reporting (AER)\n");
5372 spin_lock_irq(&phba->hbalock);
5373 phba->hba_flag |= HBA_AER_ENABLED;
5374 spin_unlock_irq(&phba->hbalock);
5375 } else {
5376 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5377 "2708 This device does not support "
James Smartb069d7e2013-05-31 17:04:36 -04005378 "Advanced Error Reporting (AER): %d\n",
5379 rc);
James Smart0d878412009-10-02 15:16:56 -04005380 phba->cfg_aer_support = 0;
5381 }
5382 }
5383
James Smarted957682007-06-17 19:56:37 -05005384 if (phba->sli_rev == 3) {
5385 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5386 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
James Smarted957682007-06-17 19:56:37 -05005387 } else {
5388 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5389 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
James Smart92d7f7b2007-06-17 19:56:38 -05005390 phba->sli3_options = 0;
James Smarted957682007-06-17 19:56:37 -05005391 }
5392
5393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04005394 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5395 phba->sli_rev, phba->max_vpi);
James Smarted957682007-06-17 19:56:37 -05005396 rc = lpfc_sli_ring_map(phba);
dea31012005-04-17 16:05:31 -05005397
5398 if (rc)
5399 goto lpfc_sli_hba_setup_error;
5400
James Smart6d368e52011-05-24 11:44:12 -04005401 /* Initialize VPIs. */
5402 if (phba->sli_rev == LPFC_SLI_REV3) {
5403 /*
5404 * The VPI bitmask and physical ID array are allocated
5405 * and initialized once only - at driver load. A port
5406 * reset doesn't need to reinitialize this memory.
5407 */
5408 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5409 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07005410 phba->vpi_bmask = kcalloc(longs,
5411 sizeof(unsigned long),
James Smart6d368e52011-05-24 11:44:12 -04005412 GFP_KERNEL);
5413 if (!phba->vpi_bmask) {
5414 rc = -ENOMEM;
5415 goto lpfc_sli_hba_setup_error;
5416 }
5417
Kees Cook6396bb22018-06-12 14:03:40 -07005418 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5419 sizeof(uint16_t),
5420 GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -04005421 if (!phba->vpi_ids) {
5422 kfree(phba->vpi_bmask);
5423 rc = -ENOMEM;
5424 goto lpfc_sli_hba_setup_error;
5425 }
5426 for (i = 0; i < phba->max_vpi; i++)
5427 phba->vpi_ids[i] = i;
5428 }
5429 }
5430
James Smart93996272008-08-24 21:50:30 -04005431 /* Init HBQs */
James Smarted957682007-06-17 19:56:37 -05005432 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5433 rc = lpfc_sli_hbq_setup(phba);
5434 if (rc)
5435 goto lpfc_sli_hba_setup_error;
5436 }
James Smart04c68492009-05-22 14:52:52 -04005437 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05005438 phba->sli.sli_flag |= LPFC_PROCESS_LA;
James Smart04c68492009-05-22 14:52:52 -04005439 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05005440
5441 rc = lpfc_config_port_post(phba);
5442 if (rc)
5443 goto lpfc_sli_hba_setup_error;
5444
James Smarted957682007-06-17 19:56:37 -05005445 return rc;
5446
James Smart92d7f7b2007-06-17 19:56:38 -05005447lpfc_sli_hba_setup_error:
James Smart2e0fef82007-06-17 19:56:36 -05005448 phba->link_state = LPFC_HBA_ERROR;
Dick Kennedy372c1872020-06-30 14:50:00 -07005449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04005450 "0445 Firmware initialization failed\n");
dea31012005-04-17 16:05:31 -05005451 return rc;
5452}
5453
James Smartda0436e2009-05-22 14:51:39 -04005454/**
5455 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5456 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01005457 *
James Smartda0436e2009-05-22 14:51:39 -04005458 * This function issue a dump mailbox command to read config region
5459 * 23 and parse the records in the region and populate driver
5460 * data structure.
5461 **/
5462static int
James Smartff78d8f2011-12-13 13:21:35 -05005463lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
James Smartda0436e2009-05-22 14:51:39 -04005464{
James Smartff78d8f2011-12-13 13:21:35 -05005465 LPFC_MBOXQ_t *mboxq;
James Smartda0436e2009-05-22 14:51:39 -04005466 struct lpfc_dmabuf *mp;
5467 struct lpfc_mqe *mqe;
5468 uint32_t data_length;
5469 int rc;
5470
5471 /* Program the default value of vlan_id and fc_map */
5472 phba->valid_vlan = 0;
5473 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5474 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5475 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5476
James Smartff78d8f2011-12-13 13:21:35 -05005477 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5478 if (!mboxq)
James Smartda0436e2009-05-22 14:51:39 -04005479 return -ENOMEM;
5480
James Smartff78d8f2011-12-13 13:21:35 -05005481 mqe = &mboxq->u.mqe;
5482 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5483 rc = -ENOMEM;
5484 goto out_free_mboxq;
5485 }
5486
James Smart3e1f0712018-11-29 16:09:29 -08005487 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
James Smartda0436e2009-05-22 14:51:39 -04005488 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5489
5490 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5491 "(%d):2571 Mailbox cmd x%x Status x%x "
5492 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5493 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5494 "CQ: x%x x%x x%x x%x\n",
5495 mboxq->vport ? mboxq->vport->vpi : 0,
5496 bf_get(lpfc_mqe_command, mqe),
5497 bf_get(lpfc_mqe_status, mqe),
5498 mqe->un.mb_words[0], mqe->un.mb_words[1],
5499 mqe->un.mb_words[2], mqe->un.mb_words[3],
5500 mqe->un.mb_words[4], mqe->un.mb_words[5],
5501 mqe->un.mb_words[6], mqe->un.mb_words[7],
5502 mqe->un.mb_words[8], mqe->un.mb_words[9],
5503 mqe->un.mb_words[10], mqe->un.mb_words[11],
5504 mqe->un.mb_words[12], mqe->un.mb_words[13],
5505 mqe->un.mb_words[14], mqe->un.mb_words[15],
5506 mqe->un.mb_words[16], mqe->un.mb_words[50],
5507 mboxq->mcqe.word0,
5508 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5509 mboxq->mcqe.trailer);
5510
5511 if (rc) {
5512 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5513 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005514 rc = -EIO;
5515 goto out_free_mboxq;
James Smartda0436e2009-05-22 14:51:39 -04005516 }
5517 data_length = mqe->un.mb_words[5];
James Smarta0c87cb2009-07-19 10:01:10 -04005518 if (data_length > DMP_RGN23_SIZE) {
James Smartd11e31d2009-06-10 17:23:06 -04005519 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5520 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005521 rc = -EIO;
5522 goto out_free_mboxq;
James Smartd11e31d2009-06-10 17:23:06 -04005523 }
James Smartda0436e2009-05-22 14:51:39 -04005524
5525 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5526 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5527 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005528 rc = 0;
5529
5530out_free_mboxq:
5531 mempool_free(mboxq, phba->mbox_mem_pool);
5532 return rc;
James Smartda0436e2009-05-22 14:51:39 -04005533}
5534
5535/**
5536 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5537 * @phba: pointer to lpfc hba data structure.
5538 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5539 * @vpd: pointer to the memory to hold resulting port vpd data.
5540 * @vpd_size: On input, the number of bytes allocated to @vpd.
5541 * On output, the number of data bytes in @vpd.
5542 *
5543 * This routine executes a READ_REV SLI4 mailbox command. In
5544 * addition, this routine gets the port vpd data.
5545 *
5546 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02005547 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -04005548 * -ENOMEM - could not allocated memory.
James Smartda0436e2009-05-22 14:51:39 -04005549 **/
5550static int
5551lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5552 uint8_t *vpd, uint32_t *vpd_size)
5553{
5554 int rc = 0;
5555 uint32_t dma_size;
5556 struct lpfc_dmabuf *dmabuf;
5557 struct lpfc_mqe *mqe;
5558
5559 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5560 if (!dmabuf)
5561 return -ENOMEM;
5562
5563 /*
5564 * Get a DMA buffer for the vpd data resulting from the READ_REV
5565 * mailbox command.
5566 */
5567 dma_size = *vpd_size;
Luis Chamberlain750afb02019-01-04 09:23:09 +01005568 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5569 &dmabuf->phys, GFP_KERNEL);
James Smartda0436e2009-05-22 14:51:39 -04005570 if (!dmabuf->virt) {
5571 kfree(dmabuf);
5572 return -ENOMEM;
5573 }
James Smartda0436e2009-05-22 14:51:39 -04005574
5575 /*
5576 * The SLI4 implementation of READ_REV conflicts at word1,
5577 * bits 31:16 and SLI4 adds vpd functionality not present
5578 * in SLI3. This code corrects the conflicts.
5579 */
5580 lpfc_read_rev(phba, mboxq);
5581 mqe = &mboxq->u.mqe;
5582 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5583 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5584 mqe->un.read_rev.word1 &= 0x0000FFFF;
5585 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5586 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5587
5588 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5589 if (rc) {
5590 dma_free_coherent(&phba->pcidev->dev, dma_size,
5591 dmabuf->virt, dmabuf->phys);
James Smartdef9c7a2009-12-21 17:02:28 -05005592 kfree(dmabuf);
James Smartda0436e2009-05-22 14:51:39 -04005593 return -EIO;
5594 }
5595
James Smartda0436e2009-05-22 14:51:39 -04005596 /*
5597 * The available vpd length cannot be bigger than the
5598 * DMA buffer passed to the port. Catch the less than
5599 * case and update the caller's size.
5600 */
5601 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5602 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5603
James Smartd7c47992010-06-08 18:31:54 -04005604 memcpy(vpd, dmabuf->virt, *vpd_size);
5605
James Smartda0436e2009-05-22 14:51:39 -04005606 dma_free_coherent(&phba->pcidev->dev, dma_size,
5607 dmabuf->virt, dmabuf->phys);
5608 kfree(dmabuf);
5609 return 0;
5610}
5611
5612/**
James Smartb3b4f3e2019-03-12 16:30:23 -07005613 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
James Smartcd1c8302011-10-10 21:33:25 -04005614 * @phba: pointer to lpfc hba data structure.
5615 *
5616 * This routine retrieves SLI4 device physical port name this PCI function
5617 * is attached to.
5618 *
5619 * Return codes
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005620 * 0 - successful
James Smartb3b4f3e2019-03-12 16:30:23 -07005621 * otherwise - failed to retrieve controller attributes
James Smartcd1c8302011-10-10 21:33:25 -04005622 **/
5623static int
James Smartb3b4f3e2019-03-12 16:30:23 -07005624lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
James Smartcd1c8302011-10-10 21:33:25 -04005625{
5626 LPFC_MBOXQ_t *mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04005627 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5628 struct lpfc_controller_attribute *cntl_attr;
James Smartcd1c8302011-10-10 21:33:25 -04005629 void *virtaddr = NULL;
5630 uint32_t alloclen, reqlen;
5631 uint32_t shdr_status, shdr_add_status;
5632 union lpfc_sli4_cfg_shdr *shdr;
James Smartcd1c8302011-10-10 21:33:25 -04005633 int rc;
5634
James Smartcd1c8302011-10-10 21:33:25 -04005635 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5636 if (!mboxq)
5637 return -ENOMEM;
James Smartcd1c8302011-10-10 21:33:25 -04005638
James Smartb3b4f3e2019-03-12 16:30:23 -07005639 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
James Smartcd1c8302011-10-10 21:33:25 -04005640 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5641 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5642 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5643 LPFC_SLI4_MBX_NEMBED);
James Smartb3b4f3e2019-03-12 16:30:23 -07005644
James Smartcd1c8302011-10-10 21:33:25 -04005645 if (alloclen < reqlen) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcd1c8302011-10-10 21:33:25 -04005647 "3084 Allocated DMA memory size (%d) is "
5648 "less than the requested DMA memory size "
5649 "(%d)\n", alloclen, reqlen);
5650 rc = -ENOMEM;
5651 goto out_free_mboxq;
5652 }
5653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5654 virtaddr = mboxq->sge_array->addr[0];
5655 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5656 shdr = &mbx_cntl_attr->cfg_shdr;
5657 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5658 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5659 if (shdr_status || shdr_add_status || rc) {
5660 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5661 "3085 Mailbox x%x (x%x/x%x) failed, "
5662 "rc:x%x, status:x%x, add_status:x%x\n",
5663 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5664 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5665 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5666 rc, shdr_status, shdr_add_status);
5667 rc = -ENXIO;
5668 goto out_free_mboxq;
5669 }
James Smartb3b4f3e2019-03-12 16:30:23 -07005670
James Smartcd1c8302011-10-10 21:33:25 -04005671 cntl_attr = &mbx_cntl_attr->cntl_attr;
5672 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5673 phba->sli4_hba.lnk_info.lnk_tp =
5674 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5675 phba->sli4_hba.lnk_info.lnk_no =
5676 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
James Smart16a93e82021-07-07 11:43:34 -07005677 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5678 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
James Smartb3b4f3e2019-03-12 16:30:23 -07005679
5680 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5681 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5682 sizeof(phba->BIOSVersion));
5683
James Smartcd1c8302011-10-10 21:33:25 -04005684 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart16a93e82021-07-07 11:43:34 -07005685 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
5686 "flash_id: x%02x, asic_rev: x%02x\n",
James Smartcd1c8302011-10-10 21:33:25 -04005687 phba->sli4_hba.lnk_info.lnk_tp,
James Smartb3b4f3e2019-03-12 16:30:23 -07005688 phba->sli4_hba.lnk_info.lnk_no,
James Smart16a93e82021-07-07 11:43:34 -07005689 phba->BIOSVersion, phba->sli4_hba.flash_id,
5690 phba->sli4_hba.asic_rev);
James Smartb3b4f3e2019-03-12 16:30:23 -07005691out_free_mboxq:
James Smart304ee432021-04-11 18:31:17 -07005692 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5693 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5694 else
5695 mempool_free(mboxq, phba->mbox_mem_pool);
James Smartb3b4f3e2019-03-12 16:30:23 -07005696 return rc;
5697}
5698
5699/**
5700 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5701 * @phba: pointer to lpfc hba data structure.
5702 *
5703 * This routine retrieves SLI4 device physical port name this PCI function
5704 * is attached to.
5705 *
5706 * Return codes
5707 * 0 - successful
5708 * otherwise - failed to retrieve physical port name
5709 **/
5710static int
5711lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5712{
5713 LPFC_MBOXQ_t *mboxq;
5714 struct lpfc_mbx_get_port_name *get_port_name;
5715 uint32_t shdr_status, shdr_add_status;
5716 union lpfc_sli4_cfg_shdr *shdr;
5717 char cport_name = 0;
5718 int rc;
5719
5720 /* We assume nothing at this point */
5721 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5722 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5723
5724 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5725 if (!mboxq)
5726 return -ENOMEM;
5727 /* obtain link type and link number via READ_CONFIG */
5728 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5729 lpfc_sli4_read_config(phba);
5730 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5731 goto retrieve_ppname;
5732
5733 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5734 rc = lpfc_sli4_get_ctl_attr(phba);
5735 if (rc)
5736 goto out_free_mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04005737
5738retrieve_ppname:
5739 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5740 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5741 sizeof(struct lpfc_mbx_get_port_name) -
5742 sizeof(struct lpfc_sli4_cfg_mhdr),
5743 LPFC_SLI4_MBX_EMBED);
5744 get_port_name = &mboxq->u.mqe.un.get_port_name;
5745 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5746 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5747 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5748 phba->sli4_hba.lnk_info.lnk_tp);
5749 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5750 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5751 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5752 if (shdr_status || shdr_add_status || rc) {
5753 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5754 "3087 Mailbox x%x (x%x/x%x) failed: "
5755 "rc:x%x, status:x%x, add_status:x%x\n",
5756 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5757 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5758 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5759 rc, shdr_status, shdr_add_status);
5760 rc = -ENXIO;
5761 goto out_free_mboxq;
5762 }
5763 switch (phba->sli4_hba.lnk_info.lnk_no) {
5764 case LPFC_LINK_NUMBER_0:
5765 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5766 &get_port_name->u.response);
5767 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5768 break;
5769 case LPFC_LINK_NUMBER_1:
5770 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5771 &get_port_name->u.response);
5772 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5773 break;
5774 case LPFC_LINK_NUMBER_2:
5775 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5776 &get_port_name->u.response);
5777 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5778 break;
5779 case LPFC_LINK_NUMBER_3:
5780 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5781 &get_port_name->u.response);
5782 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5783 break;
5784 default:
5785 break;
5786 }
5787
5788 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5789 phba->Port[0] = cport_name;
5790 phba->Port[1] = '\0';
5791 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5792 "3091 SLI get port name: %s\n", phba->Port);
5793 }
5794
5795out_free_mboxq:
James Smart304ee432021-04-11 18:31:17 -07005796 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5797 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5798 else
5799 mempool_free(mboxq, phba->mbox_mem_pool);
James Smartcd1c8302011-10-10 21:33:25 -04005800 return rc;
5801}
5802
5803/**
James Smartda0436e2009-05-22 14:51:39 -04005804 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5805 * @phba: pointer to lpfc hba data structure.
5806 *
5807 * This routine is called to explicitly arm the SLI4 device's completion and
5808 * event queues
5809 **/
5810static void
5811lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5812{
James Smart895427b2017-02-12 13:52:30 -08005813 int qidx;
James Smartb71413d2018-02-22 08:18:40 -08005814 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
James Smartcdb42be2019-01-28 11:14:21 -08005815 struct lpfc_sli4_hdw_queue *qp;
James Smart657add42019-05-21 17:49:06 -07005816 struct lpfc_queue *eq;
James Smartda0436e2009-05-22 14:51:39 -04005817
James Smart32517fc2019-01-28 11:14:33 -08005818 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5819 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
James Smartb71413d2018-02-22 08:18:40 -08005820 if (sli4_hba->nvmels_cq)
James Smart32517fc2019-01-28 11:14:33 -08005821 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5822 LPFC_QUEUE_REARM);
James Smart895427b2017-02-12 13:52:30 -08005823
James Smartcdb42be2019-01-28 11:14:21 -08005824 if (sli4_hba->hdwq) {
James Smart657add42019-05-21 17:49:06 -07005825 /* Loop thru all Hardware Queues */
James Smartcdb42be2019-01-28 11:14:21 -08005826 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
James Smart657add42019-05-21 17:49:06 -07005827 qp = &sli4_hba->hdwq[qidx];
5828 /* ARM the corresponding CQ */
James Smart01f2ef62019-08-28 16:19:11 -07005829 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
James Smartc00f62e2019-08-14 16:57:11 -07005830 LPFC_QUEUE_REARM);
James Smartcdb42be2019-01-28 11:14:21 -08005831 }
James Smart895427b2017-02-12 13:52:30 -08005832
James Smart657add42019-05-21 17:49:06 -07005833 /* Loop thru all IRQ vectors */
5834 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5835 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5836 /* ARM the corresponding EQ */
5837 sli4_hba->sli4_write_eq_db(phba, eq,
5838 0, LPFC_QUEUE_REARM);
5839 }
James Smartcdb42be2019-01-28 11:14:21 -08005840 }
James Smart1ba981f2014-02-20 09:56:45 -05005841
James Smart2d7dbc42017-02-12 13:52:35 -08005842 if (phba->nvmet_support) {
5843 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
James Smart32517fc2019-01-28 11:14:33 -08005844 sli4_hba->sli4_write_cq_db(phba,
5845 sli4_hba->nvmet_cqset[qidx], 0,
James Smart2d7dbc42017-02-12 13:52:35 -08005846 LPFC_QUEUE_REARM);
5847 }
James Smart2e90f4b2011-12-13 13:22:37 -05005848 }
James Smartda0436e2009-05-22 14:51:39 -04005849}
5850
5851/**
James Smart6d368e52011-05-24 11:44:12 -04005852 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5853 * @phba: Pointer to HBA context object.
5854 * @type: The resource extent type.
James Smartb76f2dc2011-07-22 18:37:42 -04005855 * @extnt_count: buffer to hold port available extent count.
5856 * @extnt_size: buffer to hold element count per extent.
James Smart6d368e52011-05-24 11:44:12 -04005857 *
James Smartb76f2dc2011-07-22 18:37:42 -04005858 * This function calls the port and retrievs the number of available
5859 * extents and their size for a particular extent type.
5860 *
5861 * Returns: 0 if successful. Nonzero otherwise.
James Smart6d368e52011-05-24 11:44:12 -04005862 **/
James Smartb76f2dc2011-07-22 18:37:42 -04005863int
James Smart6d368e52011-05-24 11:44:12 -04005864lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5865 uint16_t *extnt_count, uint16_t *extnt_size)
5866{
5867 int rc = 0;
5868 uint32_t length;
5869 uint32_t mbox_tmo;
5870 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5871 LPFC_MBOXQ_t *mbox;
5872
5873 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5874 if (!mbox)
5875 return -ENOMEM;
5876
5877 /* Find out how many extents are available for this resource type */
5878 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5879 sizeof(struct lpfc_sli4_cfg_mhdr));
5880 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5881 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5882 length, LPFC_SLI4_MBX_EMBED);
5883
5884 /* Send an extents count of 0 - the GET doesn't use it. */
5885 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5886 LPFC_SLI4_MBX_EMBED);
5887 if (unlikely(rc)) {
5888 rc = -EIO;
5889 goto err_exit;
5890 }
5891
5892 if (!phba->sli4_hba.intr_enable)
5893 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5894 else {
James Smarta183a152011-10-10 21:32:43 -04005895 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005896 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5897 }
5898 if (unlikely(rc)) {
5899 rc = -EIO;
5900 goto err_exit;
5901 }
5902
5903 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5904 if (bf_get(lpfc_mbox_hdr_status,
5905 &rsrc_info->header.cfg_shdr.response)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6d368e52011-05-24 11:44:12 -04005907 "2930 Failed to get resource extents "
5908 "Status 0x%x Add'l Status 0x%x\n",
5909 bf_get(lpfc_mbox_hdr_status,
5910 &rsrc_info->header.cfg_shdr.response),
5911 bf_get(lpfc_mbox_hdr_add_status,
5912 &rsrc_info->header.cfg_shdr.response));
5913 rc = -EIO;
5914 goto err_exit;
5915 }
5916
5917 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5918 &rsrc_info->u.rsp);
5919 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5920 &rsrc_info->u.rsp);
James Smart8a9d2e82012-05-09 21:16:12 -04005921
5922 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5923 "3162 Retrieved extents type-%d from port: count:%d, "
5924 "size:%d\n", type, *extnt_count, *extnt_size);
5925
5926err_exit:
James Smart6d368e52011-05-24 11:44:12 -04005927 mempool_free(mbox, phba->mbox_mem_pool);
5928 return rc;
5929}
5930
5931/**
5932 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5933 * @phba: Pointer to HBA context object.
5934 * @type: The extent type to check.
5935 *
5936 * This function reads the current available extents from the port and checks
5937 * if the extent count or extent size has changed since the last access.
5938 * Callers use this routine post port reset to understand if there is a
5939 * extent reprovisioning requirement.
5940 *
5941 * Returns:
5942 * -Error: error indicates problem.
5943 * 1: Extent count or size has changed.
5944 * 0: No changes.
5945 **/
5946static int
5947lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5948{
5949 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5950 uint16_t size_diff, rsrc_ext_size;
5951 int rc = 0;
5952 struct lpfc_rsrc_blks *rsrc_entry;
5953 struct list_head *rsrc_blk_list = NULL;
5954
5955 size_diff = 0;
5956 curr_ext_cnt = 0;
5957 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5958 &rsrc_ext_cnt,
5959 &rsrc_ext_size);
5960 if (unlikely(rc))
5961 return -EIO;
5962
5963 switch (type) {
5964 case LPFC_RSC_TYPE_FCOE_RPI:
5965 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5966 break;
5967 case LPFC_RSC_TYPE_FCOE_VPI:
5968 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5969 break;
5970 case LPFC_RSC_TYPE_FCOE_XRI:
5971 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5972 break;
5973 case LPFC_RSC_TYPE_FCOE_VFI:
5974 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5975 break;
5976 default:
5977 break;
5978 }
5979
5980 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5981 curr_ext_cnt++;
5982 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5983 size_diff++;
5984 }
5985
5986 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5987 rc = 1;
5988
5989 return rc;
5990}
5991
5992/**
5993 * lpfc_sli4_cfg_post_extnts -
5994 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01005995 * @extnt_cnt: number of available extents.
5996 * @type: the extent type (rpi, xri, vfi, vpi).
5997 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5998 * @mbox: pointer to the caller's allocated mailbox structure.
James Smart6d368e52011-05-24 11:44:12 -04005999 *
6000 * This function executes the extents allocation request. It also
6001 * takes care of the amount of memory needed to allocate or get the
6002 * allocated extents. It is the caller's responsibility to evaluate
6003 * the response.
6004 *
6005 * Returns:
6006 * -Error: Error value describes the condition found.
6007 * 0: if successful
6008 **/
6009static int
James Smart8a9d2e82012-05-09 21:16:12 -04006010lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
James Smart6d368e52011-05-24 11:44:12 -04006011 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6012{
6013 int rc = 0;
6014 uint32_t req_len;
6015 uint32_t emb_len;
6016 uint32_t alloc_len, mbox_tmo;
6017
6018 /* Calculate the total requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -04006019 req_len = extnt_cnt * sizeof(uint16_t);
James Smart6d368e52011-05-24 11:44:12 -04006020
6021 /*
6022 * Calculate the size of an embedded mailbox. The uint32_t
6023 * accounts for extents-specific word.
6024 */
6025 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6026 sizeof(uint32_t);
6027
6028 /*
6029 * Presume the allocation and response will fit into an embedded
6030 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6031 */
6032 *emb = LPFC_SLI4_MBX_EMBED;
6033 if (req_len > emb_len) {
James Smart8a9d2e82012-05-09 21:16:12 -04006034 req_len = extnt_cnt * sizeof(uint16_t) +
James Smart6d368e52011-05-24 11:44:12 -04006035 sizeof(union lpfc_sli4_cfg_shdr) +
6036 sizeof(uint32_t);
6037 *emb = LPFC_SLI4_MBX_NEMBED;
6038 }
6039
6040 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6041 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6042 req_len, *emb);
6043 if (alloc_len < req_len) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartb76f2dc2011-07-22 18:37:42 -04006045 "2982 Allocated DMA memory size (x%x) is "
James Smart6d368e52011-05-24 11:44:12 -04006046 "less than the requested DMA memory "
6047 "size (x%x)\n", alloc_len, req_len);
6048 return -ENOMEM;
6049 }
James Smart8a9d2e82012-05-09 21:16:12 -04006050 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
James Smart6d368e52011-05-24 11:44:12 -04006051 if (unlikely(rc))
6052 return -EIO;
6053
6054 if (!phba->sli4_hba.intr_enable)
6055 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6056 else {
James Smarta183a152011-10-10 21:32:43 -04006057 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04006058 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6059 }
6060
6061 if (unlikely(rc))
6062 rc = -EIO;
6063 return rc;
6064}
6065
6066/**
6067 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6068 * @phba: Pointer to HBA context object.
6069 * @type: The resource extent type to allocate.
6070 *
6071 * This function allocates the number of elements for the specified
6072 * resource type.
6073 **/
6074static int
6075lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6076{
6077 bool emb = false;
6078 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6079 uint16_t rsrc_id, rsrc_start, j, k;
6080 uint16_t *ids;
6081 int i, rc;
6082 unsigned long longs;
6083 unsigned long *bmask;
6084 struct lpfc_rsrc_blks *rsrc_blks;
6085 LPFC_MBOXQ_t *mbox;
6086 uint32_t length;
6087 struct lpfc_id_range *id_array = NULL;
6088 void *virtaddr = NULL;
6089 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6090 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6091 struct list_head *ext_blk_list;
6092
6093 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6094 &rsrc_cnt,
6095 &rsrc_size);
6096 if (unlikely(rc))
6097 return -EIO;
6098
6099 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006100 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6d368e52011-05-24 11:44:12 -04006101 "3009 No available Resource Extents "
6102 "for resource type 0x%x: Count: 0x%x, "
6103 "Size 0x%x\n", type, rsrc_cnt,
6104 rsrc_size);
6105 return -ENOMEM;
6106 }
6107
James Smart8a9d2e82012-05-09 21:16:12 -04006108 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6109 "2903 Post resource extents type-0x%x: "
6110 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
James Smart6d368e52011-05-24 11:44:12 -04006111
6112 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6113 if (!mbox)
6114 return -ENOMEM;
6115
James Smart8a9d2e82012-05-09 21:16:12 -04006116 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
James Smart6d368e52011-05-24 11:44:12 -04006117 if (unlikely(rc)) {
6118 rc = -EIO;
6119 goto err_exit;
6120 }
6121
6122 /*
6123 * Figure out where the response is located. Then get local pointers
6124 * to the response data. The port does not guarantee to respond to
6125 * all extents counts request so update the local variable with the
6126 * allocated count from the port.
6127 */
6128 if (emb == LPFC_SLI4_MBX_EMBED) {
6129 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6130 id_array = &rsrc_ext->u.rsp.id[0];
6131 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6132 } else {
6133 virtaddr = mbox->sge_array->addr[0];
6134 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6135 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6136 id_array = &n_rsrc->id;
6137 }
6138
6139 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6140 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6141
6142 /*
6143 * Based on the resource size and count, correct the base and max
6144 * resource values.
6145 */
6146 length = sizeof(struct lpfc_rsrc_blks);
6147 switch (type) {
6148 case LPFC_RSC_TYPE_FCOE_RPI:
Kees Cook6396bb22018-06-12 14:03:40 -07006149 phba->sli4_hba.rpi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006150 sizeof(unsigned long),
6151 GFP_KERNEL);
6152 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6153 rc = -ENOMEM;
6154 goto err_exit;
6155 }
Kees Cook6396bb22018-06-12 14:03:40 -07006156 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
James Smart6d368e52011-05-24 11:44:12 -04006157 sizeof(uint16_t),
6158 GFP_KERNEL);
6159 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6160 kfree(phba->sli4_hba.rpi_bmask);
6161 rc = -ENOMEM;
6162 goto err_exit;
6163 }
6164
6165 /*
6166 * The next_rpi was initialized with the maximum available
6167 * count but the port may allocate a smaller number. Catch
6168 * that case and update the next_rpi.
6169 */
6170 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6171
6172 /* Initialize local ptrs for common extent processing later. */
6173 bmask = phba->sli4_hba.rpi_bmask;
6174 ids = phba->sli4_hba.rpi_ids;
6175 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6176 break;
6177 case LPFC_RSC_TYPE_FCOE_VPI:
Kees Cook6396bb22018-06-12 14:03:40 -07006178 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
James Smart6d368e52011-05-24 11:44:12 -04006179 GFP_KERNEL);
6180 if (unlikely(!phba->vpi_bmask)) {
6181 rc = -ENOMEM;
6182 goto err_exit;
6183 }
Kees Cook6396bb22018-06-12 14:03:40 -07006184 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006185 GFP_KERNEL);
6186 if (unlikely(!phba->vpi_ids)) {
6187 kfree(phba->vpi_bmask);
6188 rc = -ENOMEM;
6189 goto err_exit;
6190 }
6191
6192 /* Initialize local ptrs for common extent processing later. */
6193 bmask = phba->vpi_bmask;
6194 ids = phba->vpi_ids;
6195 ext_blk_list = &phba->lpfc_vpi_blk_list;
6196 break;
6197 case LPFC_RSC_TYPE_FCOE_XRI:
Kees Cook6396bb22018-06-12 14:03:40 -07006198 phba->sli4_hba.xri_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006199 sizeof(unsigned long),
6200 GFP_KERNEL);
6201 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6202 rc = -ENOMEM;
6203 goto err_exit;
6204 }
James Smart8a9d2e82012-05-09 21:16:12 -04006205 phba->sli4_hba.max_cfg_param.xri_used = 0;
Kees Cook6396bb22018-06-12 14:03:40 -07006206 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
James Smart6d368e52011-05-24 11:44:12 -04006207 sizeof(uint16_t),
6208 GFP_KERNEL);
6209 if (unlikely(!phba->sli4_hba.xri_ids)) {
6210 kfree(phba->sli4_hba.xri_bmask);
6211 rc = -ENOMEM;
6212 goto err_exit;
6213 }
6214
6215 /* Initialize local ptrs for common extent processing later. */
6216 bmask = phba->sli4_hba.xri_bmask;
6217 ids = phba->sli4_hba.xri_ids;
6218 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6219 break;
6220 case LPFC_RSC_TYPE_FCOE_VFI:
Kees Cook6396bb22018-06-12 14:03:40 -07006221 phba->sli4_hba.vfi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006222 sizeof(unsigned long),
6223 GFP_KERNEL);
6224 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6225 rc = -ENOMEM;
6226 goto err_exit;
6227 }
Kees Cook6396bb22018-06-12 14:03:40 -07006228 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
James Smart6d368e52011-05-24 11:44:12 -04006229 sizeof(uint16_t),
6230 GFP_KERNEL);
6231 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6232 kfree(phba->sli4_hba.vfi_bmask);
6233 rc = -ENOMEM;
6234 goto err_exit;
6235 }
6236
6237 /* Initialize local ptrs for common extent processing later. */
6238 bmask = phba->sli4_hba.vfi_bmask;
6239 ids = phba->sli4_hba.vfi_ids;
6240 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6241 break;
6242 default:
6243 /* Unsupported Opcode. Fail call. */
6244 id_array = NULL;
6245 bmask = NULL;
6246 ids = NULL;
6247 ext_blk_list = NULL;
6248 goto err_exit;
6249 }
6250
6251 /*
6252 * Complete initializing the extent configuration with the
6253 * allocated ids assigned to this function. The bitmask serves
6254 * as an index into the array and manages the available ids. The
6255 * array just stores the ids communicated to the port via the wqes.
6256 */
6257 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6258 if ((i % 2) == 0)
6259 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6260 &id_array[k]);
6261 else
6262 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6263 &id_array[k]);
6264
6265 rsrc_blks = kzalloc(length, GFP_KERNEL);
6266 if (unlikely(!rsrc_blks)) {
6267 rc = -ENOMEM;
6268 kfree(bmask);
6269 kfree(ids);
6270 goto err_exit;
6271 }
6272 rsrc_blks->rsrc_start = rsrc_id;
6273 rsrc_blks->rsrc_size = rsrc_size;
6274 list_add_tail(&rsrc_blks->list, ext_blk_list);
6275 rsrc_start = rsrc_id;
James Smart895427b2017-02-12 13:52:30 -08006276 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
James Smart5e5b5112019-01-28 11:14:22 -08006277 phba->sli4_hba.io_xri_start = rsrc_start +
James Smart895427b2017-02-12 13:52:30 -08006278 lpfc_sli4_get_iocb_cnt(phba);
James Smart895427b2017-02-12 13:52:30 -08006279 }
James Smart6d368e52011-05-24 11:44:12 -04006280
6281 while (rsrc_id < (rsrc_start + rsrc_size)) {
6282 ids[j] = rsrc_id;
6283 rsrc_id++;
6284 j++;
6285 }
6286 /* Entire word processed. Get next word.*/
6287 if ((i % 2) == 1)
6288 k++;
6289 }
6290 err_exit:
6291 lpfc_sli4_mbox_cmd_free(phba, mbox);
6292 return rc;
6293}
6294
James Smart895427b2017-02-12 13:52:30 -08006295
6296
James Smart6d368e52011-05-24 11:44:12 -04006297/**
6298 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6299 * @phba: Pointer to HBA context object.
6300 * @type: the extent's type.
6301 *
6302 * This function deallocates all extents of a particular resource type.
6303 * SLI4 does not allow for deallocating a particular extent range. It
6304 * is the caller's responsibility to release all kernel memory resources.
6305 **/
6306static int
6307lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6308{
6309 int rc;
6310 uint32_t length, mbox_tmo = 0;
6311 LPFC_MBOXQ_t *mbox;
6312 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6313 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6314
6315 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6316 if (!mbox)
6317 return -ENOMEM;
6318
6319 /*
6320 * This function sends an embedded mailbox because it only sends the
6321 * the resource type. All extents of this type are released by the
6322 * port.
6323 */
6324 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6325 sizeof(struct lpfc_sli4_cfg_mhdr));
6326 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6327 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6328 length, LPFC_SLI4_MBX_EMBED);
6329
6330 /* Send an extents count of 0 - the dealloc doesn't use it. */
6331 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6332 LPFC_SLI4_MBX_EMBED);
6333 if (unlikely(rc)) {
6334 rc = -EIO;
6335 goto out_free_mbox;
6336 }
6337 if (!phba->sli4_hba.intr_enable)
6338 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6339 else {
James Smarta183a152011-10-10 21:32:43 -04006340 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04006341 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6342 }
6343 if (unlikely(rc)) {
6344 rc = -EIO;
6345 goto out_free_mbox;
6346 }
6347
6348 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6349 if (bf_get(lpfc_mbox_hdr_status,
6350 &dealloc_rsrc->header.cfg_shdr.response)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6d368e52011-05-24 11:44:12 -04006352 "2919 Failed to release resource extents "
6353 "for type %d - Status 0x%x Add'l Status 0x%x. "
6354 "Resource memory not released.\n",
6355 type,
6356 bf_get(lpfc_mbox_hdr_status,
6357 &dealloc_rsrc->header.cfg_shdr.response),
6358 bf_get(lpfc_mbox_hdr_add_status,
6359 &dealloc_rsrc->header.cfg_shdr.response));
6360 rc = -EIO;
6361 goto out_free_mbox;
6362 }
6363
6364 /* Release kernel memory resources for the specific type. */
6365 switch (type) {
6366 case LPFC_RSC_TYPE_FCOE_VPI:
6367 kfree(phba->vpi_bmask);
6368 kfree(phba->vpi_ids);
6369 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6370 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6371 &phba->lpfc_vpi_blk_list, list) {
6372 list_del_init(&rsrc_blk->list);
6373 kfree(rsrc_blk);
6374 }
James Smart16a3a202013-04-17 20:14:38 -04006375 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006376 break;
6377 case LPFC_RSC_TYPE_FCOE_XRI:
6378 kfree(phba->sli4_hba.xri_bmask);
6379 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04006380 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6381 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6382 list_del_init(&rsrc_blk->list);
6383 kfree(rsrc_blk);
6384 }
6385 break;
6386 case LPFC_RSC_TYPE_FCOE_VFI:
6387 kfree(phba->sli4_hba.vfi_bmask);
6388 kfree(phba->sli4_hba.vfi_ids);
6389 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6390 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6391 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6392 list_del_init(&rsrc_blk->list);
6393 kfree(rsrc_blk);
6394 }
6395 break;
6396 case LPFC_RSC_TYPE_FCOE_RPI:
6397 /* RPI bitmask and physical id array are cleaned up earlier. */
6398 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6399 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6400 list_del_init(&rsrc_blk->list);
6401 kfree(rsrc_blk);
6402 }
6403 break;
6404 default:
6405 break;
6406 }
6407
6408 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6409
6410 out_free_mbox:
6411 mempool_free(mbox, phba->mbox_mem_pool);
6412 return rc;
6413}
6414
Baoyou Xiebd4b3e52016-09-25 13:44:55 +08006415static void
James Smart7bdedb32016-07-06 12:36:00 -07006416lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6417 uint32_t feature)
James Smart65791f12016-07-06 12:35:56 -07006418{
James Smart65791f12016-07-06 12:35:56 -07006419 uint32_t len;
James Smart65791f12016-07-06 12:35:56 -07006420
James Smart65791f12016-07-06 12:35:56 -07006421 len = sizeof(struct lpfc_mbx_set_feature) -
6422 sizeof(struct lpfc_sli4_cfg_mhdr);
6423 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6424 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6425 LPFC_SLI4_MBX_EMBED);
James Smart65791f12016-07-06 12:35:56 -07006426
James Smart7bdedb32016-07-06 12:36:00 -07006427 switch (feature) {
6428 case LPFC_SET_UE_RECOVERY:
6429 bf_set(lpfc_mbx_set_feature_UER,
6430 &mbox->u.mqe.un.set_feature, 1);
6431 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6432 mbox->u.mqe.un.set_feature.param_len = 8;
6433 break;
6434 case LPFC_SET_MDS_DIAGS:
6435 bf_set(lpfc_mbx_set_feature_mds,
6436 &mbox->u.mqe.un.set_feature, 1);
6437 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
James Smartae9e28f2017-05-15 15:20:51 -07006438 &mbox->u.mqe.un.set_feature, 1);
James Smart7bdedb32016-07-06 12:36:00 -07006439 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6440 mbox->u.mqe.un.set_feature.param_len = 8;
6441 break;
James Smart171f6c42019-11-04 16:57:07 -08006442 case LPFC_SET_DUAL_DUMP:
6443 bf_set(lpfc_mbx_set_feature_dd,
6444 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6445 bf_set(lpfc_mbx_set_feature_ddquery,
6446 &mbox->u.mqe.un.set_feature, 0);
6447 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6448 mbox->u.mqe.un.set_feature.param_len = 4;
6449 break;
James Smart65791f12016-07-06 12:35:56 -07006450 }
James Smart7bdedb32016-07-06 12:36:00 -07006451
6452 return;
James Smart65791f12016-07-06 12:35:56 -07006453}
6454
James Smart6d368e52011-05-24 11:44:12 -04006455/**
James Smart1165a5c2018-11-29 16:09:39 -08006456 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6457 * @phba: Pointer to HBA context object.
6458 *
6459 * Disable FW logging into host memory on the adapter. To
6460 * be done before reading logs from the host memory.
6461 **/
6462void
6463lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6464{
6465 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6466
James Smart95bfc6d2019-10-18 14:18:27 -07006467 spin_lock_irq(&phba->hbalock);
6468 ras_fwlog->state = INACTIVE;
6469 spin_unlock_irq(&phba->hbalock);
James Smart1165a5c2018-11-29 16:09:39 -08006470
6471 /* Disable FW logging to host memory */
6472 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6473 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
James Smart95bfc6d2019-10-18 14:18:27 -07006474
6475 /* Wait 10ms for firmware to stop using DMA buffer */
6476 usleep_range(10 * 1000, 20 * 1000);
James Smart1165a5c2018-11-29 16:09:39 -08006477}
6478
6479/**
James Smartd2cc9bc2018-09-10 10:30:50 -07006480 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6481 * @phba: Pointer to HBA context object.
6482 *
6483 * This function is called to free memory allocated for RAS FW logging
6484 * support in the driver.
6485 **/
6486void
6487lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6488{
6489 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6490 struct lpfc_dmabuf *dmabuf, *next;
6491
6492 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6493 list_for_each_entry_safe(dmabuf, next,
6494 &ras_fwlog->fwlog_buff_list,
6495 list) {
6496 list_del(&dmabuf->list);
6497 dma_free_coherent(&phba->pcidev->dev,
6498 LPFC_RAS_MAX_ENTRY_SIZE,
6499 dmabuf->virt, dmabuf->phys);
6500 kfree(dmabuf);
6501 }
6502 }
6503
6504 if (ras_fwlog->lwpd.virt) {
6505 dma_free_coherent(&phba->pcidev->dev,
6506 sizeof(uint32_t) * 2,
6507 ras_fwlog->lwpd.virt,
6508 ras_fwlog->lwpd.phys);
6509 ras_fwlog->lwpd.virt = NULL;
6510 }
6511
James Smart95bfc6d2019-10-18 14:18:27 -07006512 spin_lock_irq(&phba->hbalock);
6513 ras_fwlog->state = INACTIVE;
6514 spin_unlock_irq(&phba->hbalock);
James Smartd2cc9bc2018-09-10 10:30:50 -07006515}
6516
6517/**
6518 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6519 * @phba: Pointer to HBA context object.
6520 * @fwlog_buff_count: Count of buffers to be created.
6521 *
6522 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6523 * to update FW log is posted to the adapter.
6524 * Buffer count is calculated based on module param ras_fwlog_buffsize
6525 * Size of each buffer posted to FW is 64K.
6526 **/
6527
6528static int
6529lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6530 uint32_t fwlog_buff_count)
6531{
6532 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6533 struct lpfc_dmabuf *dmabuf;
6534 int rc = 0, i = 0;
6535
6536 /* Initialize List */
6537 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6538
6539 /* Allocate memory for the LWPD */
6540 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6541 sizeof(uint32_t) * 2,
6542 &ras_fwlog->lwpd.phys,
6543 GFP_KERNEL);
6544 if (!ras_fwlog->lwpd.virt) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006545 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartd2cc9bc2018-09-10 10:30:50 -07006546 "6185 LWPD Memory Alloc Failed\n");
6547
6548 return -ENOMEM;
6549 }
6550
6551 ras_fwlog->fw_buffcount = fwlog_buff_count;
6552 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6553 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6554 GFP_KERNEL);
6555 if (!dmabuf) {
6556 rc = -ENOMEM;
6557 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6558 "6186 Memory Alloc failed FW logging");
6559 goto free_mem;
6560 }
6561
Luis Chamberlain750afb02019-01-04 09:23:09 +01006562 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
James Smartd2cc9bc2018-09-10 10:30:50 -07006563 LPFC_RAS_MAX_ENTRY_SIZE,
Luis Chamberlain750afb02019-01-04 09:23:09 +01006564 &dmabuf->phys, GFP_KERNEL);
James Smartd2cc9bc2018-09-10 10:30:50 -07006565 if (!dmabuf->virt) {
6566 kfree(dmabuf);
6567 rc = -ENOMEM;
6568 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6569 "6187 DMA Alloc Failed FW logging");
6570 goto free_mem;
6571 }
James Smartd2cc9bc2018-09-10 10:30:50 -07006572 dmabuf->buffer_tag = i;
6573 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6574 }
6575
6576free_mem:
6577 if (rc)
6578 lpfc_sli4_ras_dma_free(phba);
6579
6580 return rc;
6581}
6582
6583/**
6584 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6585 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +01006586 * @pmb: pointer to the driver internal queue element for mailbox command.
James Smartd2cc9bc2018-09-10 10:30:50 -07006587 *
6588 * Completion handler for driver's RAS MBX command to the device.
6589 **/
6590static void
6591lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6592{
6593 MAILBOX_t *mb;
6594 union lpfc_sli4_cfg_shdr *shdr;
6595 uint32_t shdr_status, shdr_add_status;
6596 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6597
6598 mb = &pmb->u.mb;
6599
6600 shdr = (union lpfc_sli4_cfg_shdr *)
6601 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6602 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6603 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6604
6605 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartd2cc9bc2018-09-10 10:30:50 -07006607 "6188 FW LOG mailbox "
6608 "completed with status x%x add_status x%x,"
6609 " mbx status x%x\n",
6610 shdr_status, shdr_add_status, mb->mbxStatus);
James Smartcb349902018-11-29 16:09:27 -08006611
6612 ras_fwlog->ras_hwsupport = false;
James Smartd2cc9bc2018-09-10 10:30:50 -07006613 goto disable_ras;
6614 }
6615
James Smart95bfc6d2019-10-18 14:18:27 -07006616 spin_lock_irq(&phba->hbalock);
6617 ras_fwlog->state = ACTIVE;
6618 spin_unlock_irq(&phba->hbalock);
James Smartd2cc9bc2018-09-10 10:30:50 -07006619 mempool_free(pmb, phba->mbox_mem_pool);
6620
6621 return;
6622
6623disable_ras:
6624 /* Free RAS DMA memory */
6625 lpfc_sli4_ras_dma_free(phba);
6626 mempool_free(pmb, phba->mbox_mem_pool);
6627}
6628
6629/**
6630 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6631 * @phba: pointer to lpfc hba data structure.
6632 * @fwlog_level: Logging verbosity level.
6633 * @fwlog_enable: Enable/Disable logging.
6634 *
6635 * Initialize memory and post mailbox command to enable FW logging in host
6636 * memory.
6637 **/
6638int
6639lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6640 uint32_t fwlog_level,
6641 uint32_t fwlog_enable)
6642{
6643 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6644 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6645 struct lpfc_dmabuf *dmabuf;
6646 LPFC_MBOXQ_t *mbox;
6647 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6648 int rc = 0;
6649
James Smart95bfc6d2019-10-18 14:18:27 -07006650 spin_lock_irq(&phba->hbalock);
6651 ras_fwlog->state = INACTIVE;
6652 spin_unlock_irq(&phba->hbalock);
6653
James Smartd2cc9bc2018-09-10 10:30:50 -07006654 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6655 phba->cfg_ras_fwlog_buffsize);
6656 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6657
6658 /*
6659 * If re-enabling FW logging support use earlier allocated
6660 * DMA buffers while posting MBX command.
6661 **/
6662 if (!ras_fwlog->lwpd.virt) {
6663 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6664 if (rc) {
6665 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
James Smartcb349902018-11-29 16:09:27 -08006666 "6189 FW Log Memory Allocation Failed");
James Smartd2cc9bc2018-09-10 10:30:50 -07006667 return rc;
6668 }
6669 }
6670
6671 /* Setup Mailbox command */
6672 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6673 if (!mbox) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006674 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartd2cc9bc2018-09-10 10:30:50 -07006675 "6190 RAS MBX Alloc Failed");
6676 rc = -ENOMEM;
6677 goto mem_free;
6678 }
6679
6680 ras_fwlog->fw_loglevel = fwlog_level;
6681 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6682 sizeof(struct lpfc_sli4_cfg_mhdr));
6683
6684 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6685 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6686 len, LPFC_SLI4_MBX_EMBED);
6687
6688 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6689 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6690 fwlog_enable);
6691 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6692 ras_fwlog->fw_loglevel);
6693 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6694 ras_fwlog->fw_buffcount);
6695 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6696 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6697
6698 /* Update DMA buffer address */
6699 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6700 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6701
6702 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6703 putPaddrLow(dmabuf->phys);
6704
6705 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6706 putPaddrHigh(dmabuf->phys);
6707 }
6708
6709 /* Update LPWD address */
6710 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6711 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6712
James Smart95bfc6d2019-10-18 14:18:27 -07006713 spin_lock_irq(&phba->hbalock);
6714 ras_fwlog->state = REG_INPROGRESS;
6715 spin_unlock_irq(&phba->hbalock);
James Smartd2cc9bc2018-09-10 10:30:50 -07006716 mbox->vport = phba->pport;
6717 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6718
6719 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6720
6721 if (rc == MBX_NOT_FINISHED) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcb349902018-11-29 16:09:27 -08006723 "6191 FW-Log Mailbox failed. "
James Smartd2cc9bc2018-09-10 10:30:50 -07006724 "status %d mbxStatus : x%x", rc,
6725 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6726 mempool_free(mbox, phba->mbox_mem_pool);
6727 rc = -EIO;
6728 goto mem_free;
6729 } else
6730 rc = 0;
6731mem_free:
6732 if (rc)
6733 lpfc_sli4_ras_dma_free(phba);
6734
6735 return rc;
6736}
6737
6738/**
6739 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6740 * @phba: Pointer to HBA context object.
6741 *
6742 * Check if RAS is supported on the adapter and initialize it.
6743 **/
6744void
6745lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6746{
6747 /* Check RAS FW Log needs to be enabled or not */
6748 if (lpfc_check_fwlog_support(phba))
6749 return;
6750
6751 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6752 LPFC_RAS_ENABLE_LOGGING);
6753}
6754
6755/**
James Smart6d368e52011-05-24 11:44:12 -04006756 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6757 * @phba: Pointer to HBA context object.
6758 *
6759 * This function allocates all SLI4 resource identifiers.
6760 **/
6761int
6762lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6763{
6764 int i, rc, error = 0;
6765 uint16_t count, base;
6766 unsigned long longs;
6767
James Smartff78d8f2011-12-13 13:21:35 -05006768 if (!phba->sli4_hba.rpi_hdrs_in_use)
6769 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart6d368e52011-05-24 11:44:12 -04006770 if (phba->sli4_hba.extents_in_use) {
6771 /*
6772 * The port supports resource extents. The XRI, VPI, VFI, RPI
6773 * resource extent count must be read and allocated before
6774 * provisioning the resource id arrays.
6775 */
6776 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6777 LPFC_IDX_RSRC_RDY) {
6778 /*
6779 * Extent-based resources are set - the driver could
6780 * be in a port reset. Figure out if any corrective
6781 * actions need to be taken.
6782 */
6783 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6784 LPFC_RSC_TYPE_FCOE_VFI);
6785 if (rc != 0)
6786 error++;
6787 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6788 LPFC_RSC_TYPE_FCOE_VPI);
6789 if (rc != 0)
6790 error++;
6791 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6792 LPFC_RSC_TYPE_FCOE_XRI);
6793 if (rc != 0)
6794 error++;
6795 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6796 LPFC_RSC_TYPE_FCOE_RPI);
6797 if (rc != 0)
6798 error++;
6799
6800 /*
6801 * It's possible that the number of resources
6802 * provided to this port instance changed between
6803 * resets. Detect this condition and reallocate
6804 * resources. Otherwise, there is no action.
6805 */
6806 if (error) {
6807 lpfc_printf_log(phba, KERN_INFO,
6808 LOG_MBOX | LOG_INIT,
6809 "2931 Detected extent resource "
6810 "change. Reallocating all "
6811 "extents.\n");
6812 rc = lpfc_sli4_dealloc_extent(phba,
6813 LPFC_RSC_TYPE_FCOE_VFI);
6814 rc = lpfc_sli4_dealloc_extent(phba,
6815 LPFC_RSC_TYPE_FCOE_VPI);
6816 rc = lpfc_sli4_dealloc_extent(phba,
6817 LPFC_RSC_TYPE_FCOE_XRI);
6818 rc = lpfc_sli4_dealloc_extent(phba,
6819 LPFC_RSC_TYPE_FCOE_RPI);
6820 } else
6821 return 0;
6822 }
6823
6824 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6825 if (unlikely(rc))
6826 goto err_exit;
6827
6828 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6829 if (unlikely(rc))
6830 goto err_exit;
6831
6832 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6833 if (unlikely(rc))
6834 goto err_exit;
6835
6836 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6837 if (unlikely(rc))
6838 goto err_exit;
6839 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6840 LPFC_IDX_RSRC_RDY);
6841 return rc;
6842 } else {
6843 /*
6844 * The port does not support resource extents. The XRI, VPI,
6845 * VFI, RPI resource ids were determined from READ_CONFIG.
6846 * Just allocate the bitmasks and provision the resource id
6847 * arrays. If a port reset is active, the resources don't
6848 * need any action - just exit.
6849 */
6850 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
James Smartff78d8f2011-12-13 13:21:35 -05006851 LPFC_IDX_RSRC_RDY) {
6852 lpfc_sli4_dealloc_resource_identifiers(phba);
6853 lpfc_sli4_remove_rpis(phba);
6854 }
James Smart6d368e52011-05-24 11:44:12 -04006855 /* RPIs. */
6856 count = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart0a630c22013-01-03 15:44:09 -05006857 if (count <= 0) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006858 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a630c22013-01-03 15:44:09 -05006859 "3279 Invalid provisioning of "
6860 "rpi:%d\n", count);
6861 rc = -EINVAL;
6862 goto err_exit;
6863 }
James Smart6d368e52011-05-24 11:44:12 -04006864 base = phba->sli4_hba.max_cfg_param.rpi_base;
6865 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006866 phba->sli4_hba.rpi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006867 sizeof(unsigned long),
6868 GFP_KERNEL);
6869 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6870 rc = -ENOMEM;
6871 goto err_exit;
6872 }
Kees Cook6396bb22018-06-12 14:03:40 -07006873 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006874 GFP_KERNEL);
6875 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6876 rc = -ENOMEM;
6877 goto free_rpi_bmask;
6878 }
6879
6880 for (i = 0; i < count; i++)
6881 phba->sli4_hba.rpi_ids[i] = base + i;
6882
6883 /* VPIs. */
6884 count = phba->sli4_hba.max_cfg_param.max_vpi;
James Smart0a630c22013-01-03 15:44:09 -05006885 if (count <= 0) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006886 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a630c22013-01-03 15:44:09 -05006887 "3280 Invalid provisioning of "
6888 "vpi:%d\n", count);
6889 rc = -EINVAL;
6890 goto free_rpi_ids;
6891 }
James Smart6d368e52011-05-24 11:44:12 -04006892 base = phba->sli4_hba.max_cfg_param.vpi_base;
6893 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006894 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
James Smart6d368e52011-05-24 11:44:12 -04006895 GFP_KERNEL);
6896 if (unlikely(!phba->vpi_bmask)) {
6897 rc = -ENOMEM;
6898 goto free_rpi_ids;
6899 }
Kees Cook6396bb22018-06-12 14:03:40 -07006900 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006901 GFP_KERNEL);
6902 if (unlikely(!phba->vpi_ids)) {
6903 rc = -ENOMEM;
6904 goto free_vpi_bmask;
6905 }
6906
6907 for (i = 0; i < count; i++)
6908 phba->vpi_ids[i] = base + i;
6909
6910 /* XRIs. */
6911 count = phba->sli4_hba.max_cfg_param.max_xri;
James Smart0a630c22013-01-03 15:44:09 -05006912 if (count <= 0) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a630c22013-01-03 15:44:09 -05006914 "3281 Invalid provisioning of "
6915 "xri:%d\n", count);
6916 rc = -EINVAL;
6917 goto free_vpi_ids;
6918 }
James Smart6d368e52011-05-24 11:44:12 -04006919 base = phba->sli4_hba.max_cfg_param.xri_base;
6920 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006921 phba->sli4_hba.xri_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006922 sizeof(unsigned long),
6923 GFP_KERNEL);
6924 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6925 rc = -ENOMEM;
6926 goto free_vpi_ids;
6927 }
James Smart41899be2012-03-01 22:34:19 -05006928 phba->sli4_hba.max_cfg_param.xri_used = 0;
Kees Cook6396bb22018-06-12 14:03:40 -07006929 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006930 GFP_KERNEL);
6931 if (unlikely(!phba->sli4_hba.xri_ids)) {
6932 rc = -ENOMEM;
6933 goto free_xri_bmask;
6934 }
6935
6936 for (i = 0; i < count; i++)
6937 phba->sli4_hba.xri_ids[i] = base + i;
6938
6939 /* VFIs. */
6940 count = phba->sli4_hba.max_cfg_param.max_vfi;
James Smart0a630c22013-01-03 15:44:09 -05006941 if (count <= 0) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a630c22013-01-03 15:44:09 -05006943 "3282 Invalid provisioning of "
6944 "vfi:%d\n", count);
6945 rc = -EINVAL;
6946 goto free_xri_ids;
6947 }
James Smart6d368e52011-05-24 11:44:12 -04006948 base = phba->sli4_hba.max_cfg_param.vfi_base;
6949 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07006950 phba->sli4_hba.vfi_bmask = kcalloc(longs,
James Smart6d368e52011-05-24 11:44:12 -04006951 sizeof(unsigned long),
6952 GFP_KERNEL);
6953 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6954 rc = -ENOMEM;
6955 goto free_xri_ids;
6956 }
Kees Cook6396bb22018-06-12 14:03:40 -07006957 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
James Smart6d368e52011-05-24 11:44:12 -04006958 GFP_KERNEL);
6959 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6960 rc = -ENOMEM;
6961 goto free_vfi_bmask;
6962 }
6963
6964 for (i = 0; i < count; i++)
6965 phba->sli4_hba.vfi_ids[i] = base + i;
6966
6967 /*
6968 * Mark all resources ready. An HBA reset doesn't need
6969 * to reset the initialization.
6970 */
6971 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6972 LPFC_IDX_RSRC_RDY);
6973 return 0;
6974 }
6975
6976 free_vfi_bmask:
6977 kfree(phba->sli4_hba.vfi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006978 phba->sli4_hba.vfi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006979 free_xri_ids:
6980 kfree(phba->sli4_hba.xri_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006981 phba->sli4_hba.xri_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006982 free_xri_bmask:
6983 kfree(phba->sli4_hba.xri_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006984 phba->sli4_hba.xri_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006985 free_vpi_ids:
6986 kfree(phba->vpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006987 phba->vpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006988 free_vpi_bmask:
6989 kfree(phba->vpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006990 phba->vpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006991 free_rpi_ids:
6992 kfree(phba->sli4_hba.rpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006993 phba->sli4_hba.rpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006994 free_rpi_bmask:
6995 kfree(phba->sli4_hba.rpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006996 phba->sli4_hba.rpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006997 err_exit:
6998 return rc;
6999}
7000
7001/**
7002 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7003 * @phba: Pointer to HBA context object.
7004 *
7005 * This function allocates the number of elements for the specified
7006 * resource type.
7007 **/
7008int
7009lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7010{
7011 if (phba->sli4_hba.extents_in_use) {
7012 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7013 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7014 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7015 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7016 } else {
7017 kfree(phba->vpi_bmask);
James Smart16a3a202013-04-17 20:14:38 -04007018 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04007019 kfree(phba->vpi_ids);
7020 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7021 kfree(phba->sli4_hba.xri_bmask);
7022 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04007023 kfree(phba->sli4_hba.vfi_bmask);
7024 kfree(phba->sli4_hba.vfi_ids);
7025 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7026 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7027 }
7028
7029 return 0;
7030}
7031
7032/**
James Smartb76f2dc2011-07-22 18:37:42 -04007033 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7034 * @phba: Pointer to HBA context object.
7035 * @type: The resource extent type.
Lee Jones7af29d42020-07-21 17:41:31 +01007036 * @extnt_cnt: buffer to hold port extent count response
James Smartb76f2dc2011-07-22 18:37:42 -04007037 * @extnt_size: buffer to hold port extent size response.
7038 *
7039 * This function calls the port to read the host allocated extents
7040 * for a particular type.
7041 **/
7042int
7043lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7044 uint16_t *extnt_cnt, uint16_t *extnt_size)
7045{
7046 bool emb;
7047 int rc = 0;
7048 uint16_t curr_blks = 0;
7049 uint32_t req_len, emb_len;
7050 uint32_t alloc_len, mbox_tmo;
7051 struct list_head *blk_list_head;
7052 struct lpfc_rsrc_blks *rsrc_blk;
7053 LPFC_MBOXQ_t *mbox;
7054 void *virtaddr = NULL;
7055 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7056 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7057 union lpfc_sli4_cfg_shdr *shdr;
7058
7059 switch (type) {
7060 case LPFC_RSC_TYPE_FCOE_VPI:
7061 blk_list_head = &phba->lpfc_vpi_blk_list;
7062 break;
7063 case LPFC_RSC_TYPE_FCOE_XRI:
7064 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7065 break;
7066 case LPFC_RSC_TYPE_FCOE_VFI:
7067 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7068 break;
7069 case LPFC_RSC_TYPE_FCOE_RPI:
7070 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7071 break;
7072 default:
7073 return -EIO;
7074 }
7075
7076 /* Count the number of extents currently allocatd for this type. */
7077 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7078 if (curr_blks == 0) {
7079 /*
7080 * The GET_ALLOCATED mailbox does not return the size,
7081 * just the count. The size should be just the size
7082 * stored in the current allocated block and all sizes
7083 * for an extent type are the same so set the return
7084 * value now.
7085 */
7086 *extnt_size = rsrc_blk->rsrc_size;
7087 }
7088 curr_blks++;
7089 }
7090
James Smartb76f2dc2011-07-22 18:37:42 -04007091 /*
7092 * Calculate the size of an embedded mailbox. The uint32_t
7093 * accounts for extents-specific word.
7094 */
7095 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7096 sizeof(uint32_t);
7097
7098 /*
7099 * Presume the allocation and response will fit into an embedded
7100 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7101 */
7102 emb = LPFC_SLI4_MBX_EMBED;
7103 req_len = emb_len;
7104 if (req_len > emb_len) {
7105 req_len = curr_blks * sizeof(uint16_t) +
7106 sizeof(union lpfc_sli4_cfg_shdr) +
7107 sizeof(uint32_t);
7108 emb = LPFC_SLI4_MBX_NEMBED;
7109 }
7110
7111 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7112 if (!mbox)
7113 return -ENOMEM;
7114 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7115
7116 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7117 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7118 req_len, emb);
7119 if (alloc_len < req_len) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartb76f2dc2011-07-22 18:37:42 -04007121 "2983 Allocated DMA memory size (x%x) is "
7122 "less than the requested DMA memory "
7123 "size (x%x)\n", alloc_len, req_len);
7124 rc = -ENOMEM;
7125 goto err_exit;
7126 }
7127 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7128 if (unlikely(rc)) {
7129 rc = -EIO;
7130 goto err_exit;
7131 }
7132
7133 if (!phba->sli4_hba.intr_enable)
7134 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7135 else {
James Smarta183a152011-10-10 21:32:43 -04007136 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smartb76f2dc2011-07-22 18:37:42 -04007137 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7138 }
7139
7140 if (unlikely(rc)) {
7141 rc = -EIO;
7142 goto err_exit;
7143 }
7144
7145 /*
7146 * Figure out where the response is located. Then get local pointers
7147 * to the response data. The port does not guarantee to respond to
7148 * all extents counts request so update the local variable with the
7149 * allocated count from the port.
7150 */
7151 if (emb == LPFC_SLI4_MBX_EMBED) {
7152 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7153 shdr = &rsrc_ext->header.cfg_shdr;
7154 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7155 } else {
7156 virtaddr = mbox->sge_array->addr[0];
7157 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7158 shdr = &n_rsrc->cfg_shdr;
7159 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7160 }
7161
7162 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartb76f2dc2011-07-22 18:37:42 -04007164 "2984 Failed to read allocated resources "
7165 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7166 type,
7167 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7168 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7169 rc = -EIO;
7170 goto err_exit;
7171 }
7172 err_exit:
7173 lpfc_sli4_mbox_cmd_free(phba, mbox);
7174 return rc;
7175}
7176
7177/**
James Smart0ef69962017-04-21 16:04:50 -07007178 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
James Smart8a9d2e82012-05-09 21:16:12 -04007179 * @phba: pointer to lpfc hba data structure.
James Smart895427b2017-02-12 13:52:30 -08007180 * @sgl_list: linked link of sgl buffers to post
7181 * @cnt: number of linked list buffers
James Smart8a9d2e82012-05-09 21:16:12 -04007182 *
James Smart895427b2017-02-12 13:52:30 -08007183 * This routine walks the list of buffers that have been allocated and
James Smart8a9d2e82012-05-09 21:16:12 -04007184 * repost them to the port by using SGL block post. This is needed after a
7185 * pci_function_reset/warm_start or start. It attempts to construct blocks
James Smart895427b2017-02-12 13:52:30 -08007186 * of buffer sgls which contains contiguous xris and uses the non-embedded
7187 * SGL block post mailbox commands to post them to the port. For single
James Smart8a9d2e82012-05-09 21:16:12 -04007188 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7189 * mailbox command for posting.
7190 *
7191 * Returns: 0 = success, non-zero failure.
7192 **/
7193static int
James Smart895427b2017-02-12 13:52:30 -08007194lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7195 struct list_head *sgl_list, int cnt)
James Smart8a9d2e82012-05-09 21:16:12 -04007196{
7197 struct lpfc_sglq *sglq_entry = NULL;
7198 struct lpfc_sglq *sglq_entry_next = NULL;
7199 struct lpfc_sglq *sglq_entry_first = NULL;
James Smart895427b2017-02-12 13:52:30 -08007200 int status, total_cnt;
7201 int post_cnt = 0, num_posted = 0, block_cnt = 0;
James Smart8a9d2e82012-05-09 21:16:12 -04007202 int last_xritag = NO_XRI;
7203 LIST_HEAD(prep_sgl_list);
7204 LIST_HEAD(blck_sgl_list);
7205 LIST_HEAD(allc_sgl_list);
7206 LIST_HEAD(post_sgl_list);
7207 LIST_HEAD(free_sgl_list);
7208
James Smart38c20672013-03-01 16:37:44 -05007209 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08007210 spin_lock(&phba->sli4_hba.sgl_list_lock);
7211 list_splice_init(sgl_list, &allc_sgl_list);
7212 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05007213 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04007214
James Smart895427b2017-02-12 13:52:30 -08007215 total_cnt = cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04007216 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7217 &allc_sgl_list, list) {
7218 list_del_init(&sglq_entry->list);
7219 block_cnt++;
7220 if ((last_xritag != NO_XRI) &&
7221 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7222 /* a hole in xri block, form a sgl posting block */
7223 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7224 post_cnt = block_cnt - 1;
7225 /* prepare list for next posting block */
7226 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7227 block_cnt = 1;
7228 } else {
7229 /* prepare list for next posting block */
7230 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7231 /* enough sgls for non-embed sgl mbox command */
7232 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7233 list_splice_init(&prep_sgl_list,
7234 &blck_sgl_list);
7235 post_cnt = block_cnt;
7236 block_cnt = 0;
7237 }
7238 }
7239 num_posted++;
7240
7241 /* keep track of last sgl's xritag */
7242 last_xritag = sglq_entry->sli4_xritag;
7243
James Smart895427b2017-02-12 13:52:30 -08007244 /* end of repost sgl list condition for buffers */
7245 if (num_posted == total_cnt) {
James Smart8a9d2e82012-05-09 21:16:12 -04007246 if (post_cnt == 0) {
7247 list_splice_init(&prep_sgl_list,
7248 &blck_sgl_list);
7249 post_cnt = block_cnt;
7250 } else if (block_cnt == 1) {
7251 status = lpfc_sli4_post_sgl(phba,
7252 sglq_entry->phys, 0,
7253 sglq_entry->sli4_xritag);
7254 if (!status) {
7255 /* successful, put sgl to posted list */
7256 list_add_tail(&sglq_entry->list,
7257 &post_sgl_list);
7258 } else {
7259 /* Failure, put sgl to free list */
7260 lpfc_printf_log(phba, KERN_WARNING,
7261 LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08007262 "3159 Failed to post "
James Smart8a9d2e82012-05-09 21:16:12 -04007263 "sgl, xritag:x%x\n",
7264 sglq_entry->sli4_xritag);
7265 list_add_tail(&sglq_entry->list,
7266 &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04007267 total_cnt--;
James Smart8a9d2e82012-05-09 21:16:12 -04007268 }
7269 }
7270 }
7271
7272 /* continue until a nembed page worth of sgls */
7273 if (post_cnt == 0)
7274 continue;
7275
James Smart895427b2017-02-12 13:52:30 -08007276 /* post the buffer list sgls as a block */
7277 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7278 post_cnt);
James Smart8a9d2e82012-05-09 21:16:12 -04007279
7280 if (!status) {
7281 /* success, put sgl list to posted sgl list */
7282 list_splice_init(&blck_sgl_list, &post_sgl_list);
7283 } else {
7284 /* Failure, put sgl list to free sgl list */
7285 sglq_entry_first = list_first_entry(&blck_sgl_list,
7286 struct lpfc_sglq,
7287 list);
7288 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08007289 "3160 Failed to post sgl-list, "
James Smart8a9d2e82012-05-09 21:16:12 -04007290 "xritag:x%x-x%x\n",
7291 sglq_entry_first->sli4_xritag,
7292 (sglq_entry_first->sli4_xritag +
7293 post_cnt - 1));
7294 list_splice_init(&blck_sgl_list, &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04007295 total_cnt -= post_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04007296 }
7297
7298 /* don't reset xirtag due to hole in xri block */
7299 if (block_cnt == 0)
7300 last_xritag = NO_XRI;
7301
James Smart895427b2017-02-12 13:52:30 -08007302 /* reset sgl post count for next round of posting */
James Smart8a9d2e82012-05-09 21:16:12 -04007303 post_cnt = 0;
7304 }
7305
James Smart895427b2017-02-12 13:52:30 -08007306 /* free the sgls failed to post */
James Smart8a9d2e82012-05-09 21:16:12 -04007307 lpfc_free_sgl_list(phba, &free_sgl_list);
7308
James Smart895427b2017-02-12 13:52:30 -08007309 /* push sgls posted to the available list */
James Smart8a9d2e82012-05-09 21:16:12 -04007310 if (!list_empty(&post_sgl_list)) {
James Smart38c20672013-03-01 16:37:44 -05007311 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08007312 spin_lock(&phba->sli4_hba.sgl_list_lock);
7313 list_splice_init(&post_sgl_list, sgl_list);
7314 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05007315 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04007316 } else {
Dick Kennedy372c1872020-06-30 14:50:00 -07007317 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08007318 "3161 Failure to post sgl to port.\n");
James Smart8a9d2e82012-05-09 21:16:12 -04007319 return -EIO;
7320 }
James Smart895427b2017-02-12 13:52:30 -08007321
7322 /* return the number of XRIs actually posted */
7323 return total_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04007324}
7325
James Smart0794d602019-01-28 11:14:19 -08007326/**
James Smart5e5b5112019-01-28 11:14:22 -08007327 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
James Smart0794d602019-01-28 11:14:19 -08007328 * @phba: pointer to lpfc hba data structure.
7329 *
7330 * This routine walks the list of nvme buffers that have been allocated and
7331 * repost them to the port by using SGL block post. This is needed after a
7332 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7333 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
James Smart5e5b5112019-01-28 11:14:22 -08007334 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
James Smart0794d602019-01-28 11:14:19 -08007335 *
7336 * Returns: 0 = success, non-zero failure.
7337 **/
Bart Van Assche3999df72019-03-28 11:06:16 -07007338static int
James Smart5e5b5112019-01-28 11:14:22 -08007339lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
James Smart0794d602019-01-28 11:14:19 -08007340{
7341 LIST_HEAD(post_nblist);
7342 int num_posted, rc = 0;
7343
7344 /* get all NVME buffers need to repost to a local list */
James Smart5e5b5112019-01-28 11:14:22 -08007345 lpfc_io_buf_flush(phba, &post_nblist);
James Smart0794d602019-01-28 11:14:19 -08007346
7347 /* post the list of nvme buffer sgls to port if available */
7348 if (!list_empty(&post_nblist)) {
James Smart5e5b5112019-01-28 11:14:22 -08007349 num_posted = lpfc_sli4_post_io_sgl_list(
7350 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
James Smart0794d602019-01-28 11:14:19 -08007351 /* failed to post any nvme buffer, return error */
7352 if (num_posted == 0)
7353 rc = -EIO;
7354 }
7355 return rc;
7356}
7357
Bart Van Assche3999df72019-03-28 11:06:16 -07007358static void
James Smart61bda8f2016-10-13 15:06:05 -07007359lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7360{
7361 uint32_t len;
7362
7363 len = sizeof(struct lpfc_mbx_set_host_data) -
7364 sizeof(struct lpfc_sli4_cfg_mhdr);
7365 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7366 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7367 LPFC_SLI4_MBX_EMBED);
7368
7369 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
James Smartb2fd1032016-12-19 15:07:21 -08007370 mbox->u.mqe.un.set_host_data.param_len =
7371 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
James Smart61bda8f2016-10-13 15:06:05 -07007372 snprintf(mbox->u.mqe.un.set_host_data.data,
7373 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7374 "Linux %s v"LPFC_DRIVER_VERSION,
7375 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7376}
7377
James Smarta8cf5df2017-05-15 15:20:46 -07007378int
James Smart6c621a22017-05-15 15:20:45 -07007379lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
James Smarta8cf5df2017-05-15 15:20:46 -07007380 struct lpfc_queue *drq, int count, int idx)
James Smart6c621a22017-05-15 15:20:45 -07007381{
7382 int rc, i;
7383 struct lpfc_rqe hrqe;
7384 struct lpfc_rqe drqe;
7385 struct lpfc_rqb *rqbp;
James Smart411de512018-01-30 15:58:52 -08007386 unsigned long flags;
James Smart6c621a22017-05-15 15:20:45 -07007387 struct rqb_dmabuf *rqb_buffer;
7388 LIST_HEAD(rqb_buf_list);
7389
7390 rqbp = hrq->rqbp;
7391 for (i = 0; i < count; i++) {
James Smart62e3a931d2020-10-20 13:27:11 -07007392 spin_lock_irqsave(&phba->hbalock, flags);
James Smart6c621a22017-05-15 15:20:45 -07007393 /* IF RQ is already full, don't bother */
James Smart62e3a931d2020-10-20 13:27:11 -07007394 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7395 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart6c621a22017-05-15 15:20:45 -07007396 break;
James Smart62e3a931d2020-10-20 13:27:11 -07007397 }
7398 spin_unlock_irqrestore(&phba->hbalock, flags);
7399
James Smart6c621a22017-05-15 15:20:45 -07007400 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7401 if (!rqb_buffer)
7402 break;
7403 rqb_buffer->hrq = hrq;
7404 rqb_buffer->drq = drq;
James Smarta8cf5df2017-05-15 15:20:46 -07007405 rqb_buffer->idx = idx;
James Smart6c621a22017-05-15 15:20:45 -07007406 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7407 }
James Smart62e3a931d2020-10-20 13:27:11 -07007408
7409 spin_lock_irqsave(&phba->hbalock, flags);
James Smart6c621a22017-05-15 15:20:45 -07007410 while (!list_empty(&rqb_buf_list)) {
7411 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7412 hbuf.list);
7413
7414 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7415 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7416 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7417 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7418 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7419 if (rc < 0) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart411de512018-01-30 15:58:52 -08007421 "6421 Cannot post to HRQ %d: %x %x %x "
7422 "DRQ %x %x\n",
7423 hrq->queue_id,
7424 hrq->host_index,
7425 hrq->hba_index,
7426 hrq->entry_count,
7427 drq->host_index,
7428 drq->hba_index);
James Smart6c621a22017-05-15 15:20:45 -07007429 rqbp->rqb_free_buffer(phba, rqb_buffer);
7430 } else {
7431 list_add_tail(&rqb_buffer->hbuf.list,
7432 &rqbp->rqb_buffer_list);
7433 rqbp->buffer_count++;
7434 }
7435 }
James Smart411de512018-01-30 15:58:52 -08007436 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart6c621a22017-05-15 15:20:45 -07007437 return 1;
7438}
7439
Dick Kennedy317aeb82020-06-30 14:49:59 -07007440/**
7441 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
Lee Jones7af29d42020-07-21 17:41:31 +01007442 * @phba: pointer to lpfc hba data structure.
Dick Kennedy317aeb82020-06-30 14:49:59 -07007443 *
7444 * This routine initializes the per-cq idle_stat to dynamically dictate
7445 * polling decisions.
7446 *
7447 * Return codes:
7448 * None
7449 **/
7450static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7451{
7452 int i;
7453 struct lpfc_sli4_hdw_queue *hdwq;
7454 struct lpfc_queue *cq;
7455 struct lpfc_idle_stat *idle_stat;
7456 u64 wall;
7457
7458 for_each_present_cpu(i) {
7459 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7460 cq = hdwq->io_cq;
7461
7462 /* Skip if we've already handled this cq's primary CPU */
7463 if (cq->chann != i)
7464 continue;
7465
7466 idle_stat = &phba->sli4_hba.idle_stat[i];
7467
7468 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7469 idle_stat->prev_wall = wall;
7470
7471 if (phba->nvmet_support)
7472 cq->poll_mode = LPFC_QUEUE_WORK;
7473 else
7474 cq->poll_mode = LPFC_IRQ_POLL;
7475 }
7476
7477 if (!phba->nvmet_support)
7478 schedule_delayed_work(&phba->idle_stat_delay_work,
7479 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7480}
7481
Dick Kennedyf0020e42020-06-30 14:49:58 -07007482static void lpfc_sli4_dip(struct lpfc_hba *phba)
7483{
7484 uint32_t if_type;
7485
7486 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7487 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7488 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7489 struct lpfc_register reg_data;
7490
7491 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7492 &reg_data.word0))
7493 return;
7494
7495 if (bf_get(lpfc_sliport_status_dip, &reg_data))
James Smart0b3ad322021-01-04 10:02:39 -08007496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
Dick Kennedyf0020e42020-06-30 14:49:58 -07007497 "2904 Firmware Dump Image Present"
7498 " on Adapter");
7499 }
7500}
7501
James Smart8a9d2e82012-05-09 21:16:12 -04007502/**
Masahiro Yamada183b8022017-02-27 14:29:20 -08007503 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
James Smartda0436e2009-05-22 14:51:39 -04007504 * @phba: Pointer to HBA context object.
7505 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08007506 * This function is the main SLI4 device initialization PCI function. This
7507 * function is called by the HBA initialization code, HBA reset code and
James Smartda0436e2009-05-22 14:51:39 -04007508 * HBA error attention handler code. Caller is not required to hold any
7509 * locks.
7510 **/
7511int
7512lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7513{
James Smart171f6c42019-11-04 16:57:07 -08007514 int rc, i, cnt, len, dd;
James Smartda0436e2009-05-22 14:51:39 -04007515 LPFC_MBOXQ_t *mboxq;
7516 struct lpfc_mqe *mqe;
7517 uint8_t *vpd;
7518 uint32_t vpd_size;
7519 uint32_t ftr_rsp = 0;
7520 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7521 struct lpfc_vport *vport = phba->pport;
7522 struct lpfc_dmabuf *mp;
James Smart2d7dbc42017-02-12 13:52:35 -08007523 struct lpfc_rqb *rqbp;
James Smartda0436e2009-05-22 14:51:39 -04007524
7525 /* Perform a PCI function reset to start from clean */
7526 rc = lpfc_pci_function_reset(phba);
7527 if (unlikely(rc))
7528 return -ENODEV;
7529
7530 /* Check the HBA Host Status Register for readyness */
7531 rc = lpfc_sli4_post_status_check(phba);
7532 if (unlikely(rc))
7533 return -ENODEV;
7534 else {
7535 spin_lock_irq(&phba->hbalock);
7536 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7537 spin_unlock_irq(&phba->hbalock);
7538 }
7539
Dick Kennedyf0020e42020-06-30 14:49:58 -07007540 lpfc_sli4_dip(phba);
7541
James Smartda0436e2009-05-22 14:51:39 -04007542 /*
7543 * Allocate a single mailbox container for initializing the
7544 * port.
7545 */
7546 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7547 if (!mboxq)
7548 return -ENOMEM;
7549
James Smartda0436e2009-05-22 14:51:39 -04007550 /* Issue READ_REV to collect vpd and FW information. */
James Smart49198b32010-04-06 15:04:33 -04007551 vpd_size = SLI4_PAGE_SIZE;
James Smartda0436e2009-05-22 14:51:39 -04007552 vpd = kzalloc(vpd_size, GFP_KERNEL);
7553 if (!vpd) {
7554 rc = -ENOMEM;
7555 goto out_free_mbox;
7556 }
7557
7558 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
James Smart76a95d72010-11-20 23:11:48 -05007559 if (unlikely(rc)) {
7560 kfree(vpd);
7561 goto out_free_mbox;
7562 }
James Smart572709e2013-07-15 18:32:43 -04007563
James Smartda0436e2009-05-22 14:51:39 -04007564 mqe = &mboxq->u.mqe;
James Smartf1126682009-06-10 17:22:44 -04007565 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
James Smartb5c53952016-03-31 14:12:30 -07007566 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
James Smart76a95d72010-11-20 23:11:48 -05007567 phba->hba_flag |= HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07007568 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7569 } else {
James Smart76a95d72010-11-20 23:11:48 -05007570 phba->hba_flag &= ~HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07007571 }
James Smart45ed1192009-10-02 15:17:02 -04007572
7573 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7574 LPFC_DCBX_CEE_MODE)
7575 phba->hba_flag |= HBA_FIP_SUPPORT;
7576 else
7577 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7578
James Smartc00f62e2019-08-14 16:57:11 -07007579 phba->hba_flag &= ~HBA_IOQ_FLUSH;
James Smart4f2e66c2012-05-09 21:17:07 -04007580
James Smartc31098c2011-04-16 11:03:33 -04007581 if (phba->sli_rev != LPFC_SLI_REV4) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007583 "0376 READ_REV Error. SLI Level %d "
7584 "FCoE enabled %d\n",
James Smart76a95d72010-11-20 23:11:48 -05007585 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
James Smartda0436e2009-05-22 14:51:39 -04007586 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05007587 kfree(vpd);
7588 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04007589 }
James Smartcd1c8302011-10-10 21:33:25 -04007590
7591 /*
James Smartff78d8f2011-12-13 13:21:35 -05007592 * Continue initialization with default values even if driver failed
7593 * to read FCoE param config regions, only read parameters if the
7594 * board is FCoE
7595 */
7596 if (phba->hba_flag & HBA_FCOE_MODE &&
7597 lpfc_sli4_read_fcoe_params(phba))
7598 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7599 "2570 Failed to read FCoE parameters\n");
7600
7601 /*
James Smartcd1c8302011-10-10 21:33:25 -04007602 * Retrieve sli4 device physical port name, failure of doing it
7603 * is considered as non-fatal.
7604 */
7605 rc = lpfc_sli4_retrieve_pport_name(phba);
7606 if (!rc)
7607 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7608 "3080 Successful retrieving SLI4 device "
7609 "physical port name: %s.\n", phba->Port);
7610
James Smartb3b4f3e2019-03-12 16:30:23 -07007611 rc = lpfc_sli4_get_ctl_attr(phba);
7612 if (!rc)
7613 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7614 "8351 Successful retrieving SLI4 device "
7615 "CTL ATTR\n");
7616
James Smartda0436e2009-05-22 14:51:39 -04007617 /*
7618 * Evaluate the read rev and vpd data. Populate the driver
7619 * state with the results. If this routine fails, the failure
7620 * is not fatal as the driver will use generic values.
7621 */
7622 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7623 if (unlikely(!rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007624 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007625 "0377 Error %d parsing vpd. "
7626 "Using defaults.\n", rc);
7627 rc = 0;
7628 }
James Smart76a95d72010-11-20 23:11:48 -05007629 kfree(vpd);
James Smartda0436e2009-05-22 14:51:39 -04007630
James Smartf1126682009-06-10 17:22:44 -04007631 /* Save information as VPD data */
7632 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7633 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
James Smart4e565cf2018-02-22 08:18:50 -08007634
7635 /*
7636 * This is because first G7 ASIC doesn't support the standard
7637 * 0x5a NVME cmd descriptor type/subtype
7638 */
7639 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7640 LPFC_SLI_INTF_IF_TYPE_6) &&
7641 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7642 (phba->vpd.rev.smRev == 0) &&
7643 (phba->cfg_nvme_embed_cmd == 1))
7644 phba->cfg_nvme_embed_cmd = 0;
7645
James Smartf1126682009-06-10 17:22:44 -04007646 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7647 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7648 &mqe->un.read_rev);
7649 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7650 &mqe->un.read_rev);
7651 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7652 &mqe->un.read_rev);
7653 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7654 &mqe->un.read_rev);
7655 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7656 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7657 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7658 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7659 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7660 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7661 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7662 "(%d):0380 READ_REV Status x%x "
7663 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7664 mboxq->vport ? mboxq->vport->vpi : 0,
7665 bf_get(lpfc_mqe_status, mqe),
7666 phba->vpd.rev.opFwName,
7667 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7668 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
James Smartda0436e2009-05-22 14:51:39 -04007669
James Smart65791f12016-07-06 12:35:56 -07007670 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
James Smart7bdedb32016-07-06 12:36:00 -07007671 LPFC_SLI_INTF_IF_TYPE_0) {
7672 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7673 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7674 if (rc == MBX_SUCCESS) {
7675 phba->hba_flag |= HBA_RECOVERABLE_UE;
7676 /* Set 1Sec interval to detect UE */
7677 phba->eratt_poll_interval = 1;
7678 phba->sli4_hba.ue_to_sr = bf_get(
7679 lpfc_mbx_set_feature_UESR,
7680 &mboxq->u.mqe.un.set_feature);
7681 phba->sli4_hba.ue_to_rp = bf_get(
7682 lpfc_mbx_set_feature_UERP,
7683 &mboxq->u.mqe.un.set_feature);
7684 }
7685 }
7686
7687 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7688 /* Enable MDS Diagnostics only if the SLI Port supports it */
7689 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7690 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7691 if (rc != MBX_SUCCESS)
7692 phba->mds_diags_support = 0;
7693 }
James Smart572709e2013-07-15 18:32:43 -04007694
James Smartda0436e2009-05-22 14:51:39 -04007695 /*
7696 * Discover the port's supported feature set and match it against the
7697 * hosts requests.
7698 */
7699 lpfc_request_features(phba, mboxq);
7700 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7701 if (unlikely(rc)) {
7702 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05007703 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04007704 }
7705
Gaurav Srivastava5e633302021-06-08 10:05:49 +05307706 /* Disable VMID if app header is not supported */
7707 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
7708 &mqe->un.req_ftrs))) {
7709 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
7710 phba->cfg_vmid_app_header = 0;
7711 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
7712 "1242 vmid feature not supported\n");
7713 }
7714
James Smartda0436e2009-05-22 14:51:39 -04007715 /*
7716 * The port must support FCP initiator mode as this is the
7717 * only mode running in the host.
7718 */
7719 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7720 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7721 "0378 No support for fcpi mode.\n");
7722 ftr_rsp++;
7723 }
James Smart0bc2b7c2018-02-22 08:18:48 -08007724
7725 /* Performance Hints are ONLY for FCoE */
7726 if (phba->hba_flag & HBA_FCOE_MODE) {
7727 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7728 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7729 else
7730 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7731 }
7732
James Smartda0436e2009-05-22 14:51:39 -04007733 /*
7734 * If the port cannot support the host's requested features
7735 * then turn off the global config parameters to disable the
7736 * feature in the driver. This is not a fatal error.
7737 */
James Smartf44ac122018-03-05 12:04:08 -08007738 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7739 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7740 phba->cfg_enable_bg = 0;
7741 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
James Smartbf086112011-08-21 21:48:13 -04007742 ftr_rsp++;
James Smartf44ac122018-03-05 12:04:08 -08007743 }
James Smartbf086112011-08-21 21:48:13 -04007744 }
James Smartda0436e2009-05-22 14:51:39 -04007745
7746 if (phba->max_vpi && phba->cfg_enable_npiv &&
7747 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7748 ftr_rsp++;
7749
7750 if (ftr_rsp) {
7751 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7752 "0379 Feature Mismatch Data: x%08x %08x "
7753 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7754 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7755 phba->cfg_enable_npiv, phba->max_vpi);
7756 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7757 phba->cfg_enable_bg = 0;
7758 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7759 phba->cfg_enable_npiv = 0;
7760 }
7761
7762 /* These SLI3 features are assumed in SLI4 */
7763 spin_lock_irq(&phba->hbalock);
7764 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7765 spin_unlock_irq(&phba->hbalock);
7766
James Smart171f6c42019-11-04 16:57:07 -08007767 /* Always try to enable dual dump feature if we can */
7768 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7769 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7770 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7771 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
Dick Kennedy372c1872020-06-30 14:50:00 -07007772 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart171f6c42019-11-04 16:57:07 -08007773 "6448 Dual Dump is enabled\n");
7774 else
7775 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7776 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7777 "rc:x%x dd:x%x\n",
7778 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7779 lpfc_sli_config_mbox_subsys_get(
7780 phba, mboxq),
7781 lpfc_sli_config_mbox_opcode_get(
7782 phba, mboxq),
7783 rc, dd);
James Smart6d368e52011-05-24 11:44:12 -04007784 /*
7785 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7786 * calls depends on these resources to complete port setup.
7787 */
7788 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7789 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6d368e52011-05-24 11:44:12 -04007791 "2920 Failed to alloc Resource IDs "
7792 "rc = x%x\n", rc);
7793 goto out_free_mbox;
7794 }
7795
James Smart61bda8f2016-10-13 15:06:05 -07007796 lpfc_set_host_data(phba, mboxq);
7797
7798 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7799 if (rc) {
7800 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7801 "2134 Failed to set host os driver version %x",
7802 rc);
7803 }
7804
James Smartda0436e2009-05-22 14:51:39 -04007805 /* Read the port's service parameters. */
James Smart9f1177a2010-02-26 14:12:57 -05007806 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7807 if (rc) {
7808 phba->link_state = LPFC_HBA_ERROR;
7809 rc = -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -05007810 goto out_free_mbox;
James Smart9f1177a2010-02-26 14:12:57 -05007811 }
7812
James Smartda0436e2009-05-22 14:51:39 -04007813 mboxq->vport = vport;
7814 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart3e1f0712018-11-29 16:09:29 -08007815 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
James Smartda0436e2009-05-22 14:51:39 -04007816 if (rc == MBX_SUCCESS) {
7817 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7818 rc = 0;
7819 }
7820
7821 /*
7822 * This memory was allocated by the lpfc_read_sparam routine. Release
7823 * it to the mbuf pool.
7824 */
7825 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7826 kfree(mp);
James Smart3e1f0712018-11-29 16:09:29 -08007827 mboxq->ctx_buf = NULL;
James Smartda0436e2009-05-22 14:51:39 -04007828 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007829 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007830 "0382 READ_SPARAM command failed "
7831 "status %d, mbxStatus x%x\n",
7832 rc, bf_get(lpfc_mqe_status, mqe));
7833 phba->link_state = LPFC_HBA_ERROR;
7834 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05007835 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04007836 }
7837
James Smart05580562011-05-24 11:40:48 -04007838 lpfc_update_vport_wwn(vport);
James Smartda0436e2009-05-22 14:51:39 -04007839
7840 /* Update the fc_host data structures with new wwn. */
7841 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7842 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7843
James Smart895427b2017-02-12 13:52:30 -08007844 /* Create all the SLI4 queues */
7845 rc = lpfc_sli4_queue_create(phba);
7846 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08007848 "3089 Failed to allocate queues\n");
7849 rc = -ENODEV;
7850 goto out_free_mbox;
7851 }
7852 /* Set up all the queues to the device */
7853 rc = lpfc_sli4_queue_setup(phba);
7854 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08007856 "0381 Error %d during queue setup.\n ", rc);
7857 goto out_stop_timers;
7858 }
7859 /* Initialize the driver internal SLI layer lists. */
7860 lpfc_sli4_setup(phba);
7861 lpfc_sli4_queue_init(phba);
7862
7863 /* update host els xri-sgl sizes and mappings */
7864 rc = lpfc_sli4_els_sgl_update(phba);
James Smart8a9d2e82012-05-09 21:16:12 -04007865 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007866 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart8a9d2e82012-05-09 21:16:12 -04007867 "1400 Failed to update xri-sgl size and "
7868 "mapping: %d\n", rc);
James Smart895427b2017-02-12 13:52:30 -08007869 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04007870 }
7871
James Smart8a9d2e82012-05-09 21:16:12 -04007872 /* register the els sgl pool to the port */
James Smart895427b2017-02-12 13:52:30 -08007873 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7874 phba->sli4_hba.els_xri_cnt);
7875 if (unlikely(rc < 0)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007876 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart8a9d2e82012-05-09 21:16:12 -04007877 "0582 Error %d during els sgl post "
7878 "operation\n", rc);
7879 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08007880 goto out_destroy_queue;
7881 }
7882 phba->sli4_hba.els_xri_cnt = rc;
7883
James Smartf358dd02017-02-12 13:52:34 -08007884 if (phba->nvmet_support) {
7885 /* update host nvmet xri-sgl sizes and mappings */
7886 rc = lpfc_sli4_nvmet_sgl_update(phba);
7887 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08007889 "6308 Failed to update nvmet-sgl size "
7890 "and mapping: %d\n", rc);
7891 goto out_destroy_queue;
7892 }
7893
7894 /* register the nvmet sgl pool to the port */
7895 rc = lpfc_sli4_repost_sgl_list(
7896 phba,
7897 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7898 phba->sli4_hba.nvmet_xri_cnt);
7899 if (unlikely(rc < 0)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08007901 "3117 Error %d during nvmet "
7902 "sgl post\n", rc);
7903 rc = -ENODEV;
7904 goto out_destroy_queue;
7905 }
7906 phba->sli4_hba.nvmet_xri_cnt = rc;
James Smart6c621a22017-05-15 15:20:45 -07007907
James Smarta5f73372019-09-21 20:58:50 -07007908 /* We allocate an iocbq for every receive context SGL.
7909 * The additional allocation is for abort and ls handling.
7910 */
7911 cnt = phba->sli4_hba.nvmet_xri_cnt +
7912 phba->sli4_hba.max_cfg_param.max_xri;
James Smartf358dd02017-02-12 13:52:34 -08007913 } else {
James Smart0794d602019-01-28 11:14:19 -08007914 /* update host common xri-sgl sizes and mappings */
James Smart5e5b5112019-01-28 11:14:22 -08007915 rc = lpfc_sli4_io_sgl_update(phba);
James Smart895427b2017-02-12 13:52:30 -08007916 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007917 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08007918 "6082 Failed to update nvme-sgl size "
7919 "and mapping: %d\n", rc);
7920 goto out_destroy_queue;
7921 }
James Smart6c621a22017-05-15 15:20:45 -07007922
James Smart0794d602019-01-28 11:14:19 -08007923 /* register the allocated common sgl pool to the port */
James Smart5e5b5112019-01-28 11:14:22 -08007924 rc = lpfc_sli4_repost_io_sgl_list(phba);
James Smart0794d602019-01-28 11:14:19 -08007925 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007926 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -08007927 "6116 Error %d during nvme sgl post "
7928 "operation\n", rc);
7929 /* Some NVME buffers were moved to abort nvme list */
7930 /* A pci function reset will repost them */
7931 rc = -ENODEV;
7932 goto out_destroy_queue;
7933 }
James Smarta5f73372019-09-21 20:58:50 -07007934 /* Each lpfc_io_buf job structure has an iocbq element.
7935 * This cnt provides for abort, els, ct and ls requests.
7936 */
7937 cnt = phba->sli4_hba.max_cfg_param.max_xri;
James Smart11e644e2017-06-15 22:56:48 -07007938 }
7939
7940 if (!phba->sli.iocbq_lookup) {
James Smart6c621a22017-05-15 15:20:45 -07007941 /* Initialize and populate the iocb list per host */
7942 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta5f73372019-09-21 20:58:50 -07007943 "2821 initialize iocb list with %d entries\n",
7944 cnt);
James Smart6c621a22017-05-15 15:20:45 -07007945 rc = lpfc_init_iocb_list(phba, cnt);
7946 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart11e644e2017-06-15 22:56:48 -07007948 "1413 Failed to init iocb list.\n");
James Smart6c621a22017-05-15 15:20:45 -07007949 goto out_destroy_queue;
7950 }
James Smart8a9d2e82012-05-09 21:16:12 -04007951 }
7952
James Smart11e644e2017-06-15 22:56:48 -07007953 if (phba->nvmet_support)
7954 lpfc_nvmet_create_targetport(phba);
7955
James Smart2d7dbc42017-02-12 13:52:35 -08007956 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
James Smart2d7dbc42017-02-12 13:52:35 -08007957 /* Post initial buffers to all RQs created */
7958 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7959 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7960 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7961 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7962 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
James Smart61f3d4b2017-05-15 15:20:41 -07007963 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
James Smart2d7dbc42017-02-12 13:52:35 -08007964 rqbp->buffer_count = 0;
7965
James Smart2d7dbc42017-02-12 13:52:35 -08007966 lpfc_post_rq_buffer(
7967 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7968 phba->sli4_hba.nvmet_mrq_data[i],
James Smart2448e482018-04-09 14:24:24 -07007969 phba->cfg_nvmet_mrq_post, i);
James Smart2d7dbc42017-02-12 13:52:35 -08007970 }
7971 }
7972
James Smartda0436e2009-05-22 14:51:39 -04007973 /* Post the rpi header region to the device. */
7974 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7975 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007977 "0393 Error %d during rpi post operation\n",
7978 rc);
7979 rc = -ENODEV;
James Smart5aa615d2021-05-14 12:55:56 -07007980 goto out_free_iocblist;
James Smartda0436e2009-05-22 14:51:39 -04007981 }
James Smart97f2ecf2012-03-01 22:35:23 -05007982 lpfc_sli4_node_prep(phba);
James Smartda0436e2009-05-22 14:51:39 -04007983
James Smart895427b2017-02-12 13:52:30 -08007984 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
James Smart2d7dbc42017-02-12 13:52:35 -08007985 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
James Smart895427b2017-02-12 13:52:30 -08007986 /*
7987 * The FC Port needs to register FCFI (index 0)
7988 */
7989 lpfc_reg_fcfi(phba, mboxq);
7990 mboxq->vport = phba->pport;
7991 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7992 if (rc != MBX_SUCCESS)
7993 goto out_unset_queue;
7994 rc = 0;
7995 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7996 &mboxq->u.mqe.un.reg_fcfi);
James Smart2d7dbc42017-02-12 13:52:35 -08007997 } else {
7998 /* We are a NVME Target mode with MRQ > 1 */
7999
8000 /* First register the FCFI */
8001 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8002 mboxq->vport = phba->pport;
8003 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8004 if (rc != MBX_SUCCESS)
8005 goto out_unset_queue;
8006 rc = 0;
8007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8008 &mboxq->u.mqe.un.reg_fcfi_mrq);
8009
8010 /* Next register the MRQs */
8011 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8012 mboxq->vport = phba->pport;
8013 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8014 if (rc != MBX_SUCCESS)
8015 goto out_unset_queue;
8016 rc = 0;
James Smart895427b2017-02-12 13:52:30 -08008017 }
8018 /* Check if the port is configured to be disabled */
8019 lpfc_sli_read_link_ste(phba);
James Smartda0436e2009-05-22 14:51:39 -04008020 }
8021
James Smartc4908502019-01-28 11:14:28 -08008022 /* Don't post more new bufs if repost already recovered
8023 * the nvme sgls.
8024 */
8025 if (phba->nvmet_support == 0) {
8026 if (phba->sli4_hba.io_xri_cnt == 0) {
8027 len = lpfc_new_io_buf(
8028 phba, phba->sli4_hba.io_xri_max);
8029 if (len == 0) {
8030 rc = -ENOMEM;
8031 goto out_unset_queue;
8032 }
8033
8034 if (phba->cfg_xri_rebalancing)
8035 lpfc_create_multixri_pools(phba);
8036 }
8037 } else {
8038 phba->cfg_xri_rebalancing = 0;
8039 }
8040
James Smartda0436e2009-05-22 14:51:39 -04008041 /* Allow asynchronous mailbox command to go through */
8042 spin_lock_irq(&phba->hbalock);
8043 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8044 spin_unlock_irq(&phba->hbalock);
8045
8046 /* Post receive buffers to the device */
8047 lpfc_sli4_rb_setup(phba);
8048
James Smartfc2b9892010-02-26 14:15:29 -05008049 /* Reset HBA FCF states after HBA reset */
8050 phba->fcf.fcf_flag = 0;
8051 phba->fcf.current_rec.flag = 0;
8052
James Smartda0436e2009-05-22 14:51:39 -04008053 /* Start the ELS watchdog timer */
James Smart8fa38512009-07-19 10:01:03 -04008054 mod_timer(&vport->els_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04008055 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
James Smartda0436e2009-05-22 14:51:39 -04008056
8057 /* Start heart beat timer */
8058 mod_timer(&phba->hb_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04008059 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
James Smarta22d73b2021-01-04 10:02:38 -08008060 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
James Smartda0436e2009-05-22 14:51:39 -04008061 phba->last_completion_time = jiffies;
8062
James Smart32517fc2019-01-28 11:14:33 -08008063 /* start eq_delay heartbeat */
8064 if (phba->cfg_auto_imax)
8065 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8066 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8067
Dick Kennedy317aeb82020-06-30 14:49:59 -07008068 /* start per phba idle_stat_delay heartbeat */
8069 lpfc_init_idle_stat_hb(phba);
8070
James Smartda0436e2009-05-22 14:51:39 -04008071 /* Start error attention (ERATT) polling timer */
James Smart256ec0d2013-04-17 20:14:58 -04008072 mod_timer(&phba->eratt_poll,
James Smart65791f12016-07-06 12:35:56 -07008073 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smartda0436e2009-05-22 14:51:39 -04008074
James Smart75baf692010-06-08 18:31:21 -04008075 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8076 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8077 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8078 if (!rc) {
8079 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8080 "2829 This device supports "
8081 "Advanced Error Reporting (AER)\n");
8082 spin_lock_irq(&phba->hbalock);
8083 phba->hba_flag |= HBA_AER_ENABLED;
8084 spin_unlock_irq(&phba->hbalock);
8085 } else {
8086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8087 "2830 This device does not support "
8088 "Advanced Error Reporting (AER)\n");
8089 phba->cfg_aer_support = 0;
8090 }
James Smart0a96e972011-07-22 18:37:28 -04008091 rc = 0;
James Smart75baf692010-06-08 18:31:21 -04008092 }
8093
James Smartda0436e2009-05-22 14:51:39 -04008094 /*
8095 * The port is ready, set the host's link state to LINK_DOWN
8096 * in preparation for link interrupts.
8097 */
James Smartda0436e2009-05-22 14:51:39 -04008098 spin_lock_irq(&phba->hbalock);
8099 phba->link_state = LPFC_LINK_DOWN;
James Smart1dc5ec22018-10-23 13:41:11 -07008100
8101 /* Check if physical ports are trunked */
8102 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8103 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8104 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8105 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8106 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8107 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8108 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8109 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
James Smartda0436e2009-05-22 14:51:39 -04008110 spin_unlock_irq(&phba->hbalock);
James Smart1dc5ec22018-10-23 13:41:11 -07008111
James Smarte8869f52019-03-12 16:30:18 -07008112 /* Arm the CQs and then EQs on device */
8113 lpfc_sli4_arm_cqeq_intr(phba);
8114
8115 /* Indicate device interrupt mode */
8116 phba->sli4_hba.intr_enable = 1;
8117
James Smart026abb82011-12-13 13:20:45 -05008118 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8119 (phba->hba_flag & LINK_DISABLED)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart026abb82011-12-13 13:20:45 -05008121 "3103 Adapter Link is disabled.\n");
8122 lpfc_down_link(phba, mboxq);
8123 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8124 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart026abb82011-12-13 13:20:45 -05008126 "3104 Adapter failed to issue "
8127 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
James Smartc4908502019-01-28 11:14:28 -08008128 goto out_io_buff_free;
James Smart026abb82011-12-13 13:20:45 -05008129 }
8130 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
James Smart1b511972011-12-13 13:23:09 -05008131 /* don't perform init_link on SLI4 FC port loopback test */
8132 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8133 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8134 if (rc)
James Smartc4908502019-01-28 11:14:28 -08008135 goto out_io_buff_free;
James Smart1b511972011-12-13 13:23:09 -05008136 }
James Smart5350d872011-10-10 21:33:49 -04008137 }
8138 mempool_free(mboxq, phba->mbox_mem_pool);
8139 return rc;
James Smartc4908502019-01-28 11:14:28 -08008140out_io_buff_free:
8141 /* Free allocated IO Buffers */
8142 lpfc_io_free(phba);
James Smart76a95d72010-11-20 23:11:48 -05008143out_unset_queue:
James Smartda0436e2009-05-22 14:51:39 -04008144 /* Unset all the queues set up in this routine when error out */
James Smart5350d872011-10-10 21:33:49 -04008145 lpfc_sli4_queue_unset(phba);
James Smart5aa615d2021-05-14 12:55:56 -07008146out_free_iocblist:
James Smart6c621a22017-05-15 15:20:45 -07008147 lpfc_free_iocb_list(phba);
James Smart5aa615d2021-05-14 12:55:56 -07008148out_destroy_queue:
James Smart5350d872011-10-10 21:33:49 -04008149 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04008150out_stop_timers:
James Smart5350d872011-10-10 21:33:49 -04008151 lpfc_stop_hba_timers(phba);
James Smartda0436e2009-05-22 14:51:39 -04008152out_free_mbox:
8153 mempool_free(mboxq, phba->mbox_mem_pool);
8154 return rc;
8155}
James Smarte59058c2008-08-24 21:49:00 -04008156
8157/**
James Smart3621a712009-04-06 18:47:14 -04008158 * lpfc_mbox_timeout - Timeout call back function for mbox timer
Lee Jones7af29d42020-07-21 17:41:31 +01008159 * @t: Context to fetch pointer to hba structure from.
dea31012005-04-17 16:05:31 -05008160 *
James Smarte59058c2008-08-24 21:49:00 -04008161 * This is the callback function for mailbox timer. The mailbox
8162 * timer is armed when a new mailbox command is issued and the timer
8163 * is deleted when the mailbox complete. The function is called by
8164 * the kernel timer code when a mailbox does not complete within
8165 * expected time. This function wakes up the worker thread to
8166 * process the mailbox timeout and returns. All the processing is
8167 * done by the worker thread function lpfc_mbox_timeout_handler.
8168 **/
dea31012005-04-17 16:05:31 -05008169void
Kees Cookf22eb4d2017-09-06 20:24:26 -07008170lpfc_mbox_timeout(struct timer_list *t)
dea31012005-04-17 16:05:31 -05008171{
Kees Cookf22eb4d2017-09-06 20:24:26 -07008172 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
dea31012005-04-17 16:05:31 -05008173 unsigned long iflag;
James Smart2e0fef82007-06-17 19:56:36 -05008174 uint32_t tmo_posted;
dea31012005-04-17 16:05:31 -05008175
James Smart2e0fef82007-06-17 19:56:36 -05008176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -05008177 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
James Smart2e0fef82007-06-17 19:56:36 -05008178 if (!tmo_posted)
8179 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8181
James Smart5e9d9b82008-06-14 22:52:53 -04008182 if (!tmo_posted)
8183 lpfc_worker_wake_up(phba);
8184 return;
dea31012005-04-17 16:05:31 -05008185}
8186
James Smarte8d3c3b2013-10-10 12:21:30 -04008187/**
8188 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8189 * are pending
8190 * @phba: Pointer to HBA context object.
8191 *
8192 * This function checks if any mailbox completions are present on the mailbox
8193 * completion queue.
8194 **/
Nicholas Krause3bb11fc2015-08-31 16:48:13 -04008195static bool
James Smarte8d3c3b2013-10-10 12:21:30 -04008196lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8197{
8198
8199 uint32_t idx;
8200 struct lpfc_queue *mcq;
8201 struct lpfc_mcqe *mcqe;
8202 bool pending_completions = false;
James Smart7365f6f2018-02-22 08:18:46 -08008203 uint8_t qe_valid;
James Smarte8d3c3b2013-10-10 12:21:30 -04008204
8205 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8206 return false;
8207
8208 /* Check for completions on mailbox completion queue */
8209
8210 mcq = phba->sli4_hba.mbx_cq;
8211 idx = mcq->hba_index;
James Smart7365f6f2018-02-22 08:18:46 -08008212 qe_valid = mcq->qe_valid;
James Smart9afbee32019-03-12 16:30:28 -07008213 while (bf_get_le32(lpfc_cqe_valid,
8214 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8215 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
James Smarte8d3c3b2013-10-10 12:21:30 -04008216 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8217 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8218 pending_completions = true;
8219 break;
8220 }
8221 idx = (idx + 1) % mcq->entry_count;
8222 if (mcq->hba_index == idx)
8223 break;
James Smart7365f6f2018-02-22 08:18:46 -08008224
8225 /* if the index wrapped around, toggle the valid bit */
8226 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8227 qe_valid = (qe_valid) ? 0 : 1;
James Smarte8d3c3b2013-10-10 12:21:30 -04008228 }
8229 return pending_completions;
8230
8231}
8232
8233/**
8234 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8235 * that were missed.
8236 * @phba: Pointer to HBA context object.
8237 *
8238 * For sli4, it is possible to miss an interrupt. As such mbox completions
8239 * maybe missed causing erroneous mailbox timeouts to occur. This function
8240 * checks to see if mbox completions are on the mailbox completion queue
8241 * and will process all the completions associated with the eq for the
8242 * mailbox completion queue.
8243 **/
YueHaibingd7b761b2019-05-31 23:28:41 +08008244static bool
James Smarte8d3c3b2013-10-10 12:21:30 -04008245lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8246{
James Smartb71413d2018-02-22 08:18:40 -08008247 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
James Smarte8d3c3b2013-10-10 12:21:30 -04008248 uint32_t eqidx;
8249 struct lpfc_queue *fpeq = NULL;
James Smart657add42019-05-21 17:49:06 -07008250 struct lpfc_queue *eq;
James Smarte8d3c3b2013-10-10 12:21:30 -04008251 bool mbox_pending;
8252
8253 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8254 return false;
8255
James Smart657add42019-05-21 17:49:06 -07008256 /* Find the EQ associated with the mbox CQ */
8257 if (sli4_hba->hdwq) {
8258 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8259 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
Daniel Wagner535fb492019-10-18 18:21:11 +02008260 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
James Smart657add42019-05-21 17:49:06 -07008261 fpeq = eq;
James Smarte8d3c3b2013-10-10 12:21:30 -04008262 break;
8263 }
James Smart657add42019-05-21 17:49:06 -07008264 }
8265 }
James Smarte8d3c3b2013-10-10 12:21:30 -04008266 if (!fpeq)
8267 return false;
8268
8269 /* Turn off interrupts from this EQ */
8270
James Smartb71413d2018-02-22 08:18:40 -08008271 sli4_hba->sli4_eq_clr_intr(fpeq);
James Smarte8d3c3b2013-10-10 12:21:30 -04008272
8273 /* Check to see if a mbox completion is pending */
8274
8275 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8276
8277 /*
8278 * If a mbox completion is pending, process all the events on EQ
8279 * associated with the mbox completion queue (this could include
8280 * mailbox commands, async events, els commands, receive queue data
8281 * and fcp commands)
8282 */
8283
8284 if (mbox_pending)
James Smart32517fc2019-01-28 11:14:33 -08008285 /* process and rearm the EQ */
James Smart93a4d6f2019-11-04 16:57:05 -08008286 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
James Smart32517fc2019-01-28 11:14:33 -08008287 else
8288 /* Always clear and re-arm the EQ */
8289 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
James Smarte8d3c3b2013-10-10 12:21:30 -04008290
8291 return mbox_pending;
8292
8293}
James Smarte59058c2008-08-24 21:49:00 -04008294
8295/**
James Smart3621a712009-04-06 18:47:14 -04008296 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
James Smarte59058c2008-08-24 21:49:00 -04008297 * @phba: Pointer to HBA context object.
8298 *
8299 * This function is called from worker thread when a mailbox command times out.
8300 * The caller is not required to hold any locks. This function will reset the
8301 * HBA and recover all the pending commands.
8302 **/
dea31012005-04-17 16:05:31 -05008303void
8304lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8305{
James Smart2e0fef82007-06-17 19:56:36 -05008306 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
James Smarteb016562014-09-03 12:58:06 -04008307 MAILBOX_t *mb = NULL;
8308
James Smart1dcb58e2007-04-25 09:51:30 -04008309 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05008310
James Smart9ec58ec2021-01-04 10:02:35 -08008311 /* If the mailbox completed, process the completion */
8312 lpfc_sli4_process_missed_mbox_completions(phba);
8313
8314 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
James Smarte8d3c3b2013-10-10 12:21:30 -04008315 return;
8316
James Smarteb016562014-09-03 12:58:06 -04008317 if (pmbox != NULL)
8318 mb = &pmbox->u.mb;
James Smarta257bf92009-04-06 18:48:10 -04008319 /* Check the pmbox pointer first. There is a race condition
8320 * between the mbox timeout handler getting executed in the
8321 * worklist and the mailbox actually completing. When this
8322 * race condition occurs, the mbox_active will be NULL.
8323 */
8324 spin_lock_irq(&phba->hbalock);
8325 if (pmbox == NULL) {
8326 lpfc_printf_log(phba, KERN_WARNING,
8327 LOG_MBOX | LOG_SLI,
8328 "0353 Active Mailbox cleared - mailbox timeout "
8329 "exiting\n");
8330 spin_unlock_irq(&phba->hbalock);
8331 return;
8332 }
8333
dea31012005-04-17 16:05:31 -05008334 /* Mbox cmd <mbxCommand> timeout */
Dick Kennedy372c1872020-06-30 14:50:00 -07008335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart32350662019-08-14 16:57:06 -07008336 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
James Smart92d7f7b2007-06-17 19:56:38 -05008337 mb->mbxCommand,
8338 phba->pport->port_state,
8339 phba->sli.sli_flag,
8340 phba->sli.mbox_active);
James Smarta257bf92009-04-06 18:48:10 -04008341 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008342
James Smart1dcb58e2007-04-25 09:51:30 -04008343 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8344 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008345 * it to fail all outstanding SCSI IO.
James Smart1dcb58e2007-04-25 09:51:30 -04008346 */
James Smart2e0fef82007-06-17 19:56:36 -05008347 spin_lock_irq(&phba->pport->work_port_lock);
8348 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8349 spin_unlock_irq(&phba->pport->work_port_lock);
8350 spin_lock_irq(&phba->hbalock);
8351 phba->link_state = LPFC_LINK_UNKNOWN;
James Smartf4b4c682009-05-22 14:53:12 -04008352 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05008353 spin_unlock_irq(&phba->hbalock);
James Smart1dcb58e2007-04-25 09:51:30 -04008354
Dick Kennedy372c1872020-06-30 14:50:00 -07008355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart76bb24e2007-10-27 13:38:00 -04008356 "0345 Resetting board due to mailbox timeout\n");
James Smart3772a992009-05-22 14:50:54 -04008357
8358 /* Reset the HBA device */
8359 lpfc_reset_hba(phba);
dea31012005-04-17 16:05:31 -05008360}
8361
James Smarte59058c2008-08-24 21:49:00 -04008362/**
James Smart3772a992009-05-22 14:50:54 -04008363 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
James Smarte59058c2008-08-24 21:49:00 -04008364 * @phba: Pointer to HBA context object.
8365 * @pmbox: Pointer to mailbox object.
8366 * @flag: Flag indicating how the mailbox need to be processed.
8367 *
8368 * This function is called by discovery code and HBA management code
James Smart3772a992009-05-22 14:50:54 -04008369 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8370 * function gets the hbalock to protect the data structures.
James Smarte59058c2008-08-24 21:49:00 -04008371 * The mailbox command can be submitted in polling mode, in which case
8372 * this function will wait in a polling loop for the completion of the
8373 * mailbox.
8374 * If the mailbox is submitted in no_wait mode (not polling) the
8375 * function will submit the command and returns immediately without waiting
8376 * for the mailbox completion. The no_wait is supported only when HBA
8377 * is in SLI2/SLI3 mode - interrupts are enabled.
8378 * The SLI interface allows only one mailbox pending at a time. If the
8379 * mailbox is issued in polling mode and there is already a mailbox
8380 * pending, then the function will return an error. If the mailbox is issued
8381 * in NO_WAIT mode and there is a mailbox pending already, the function
8382 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8383 * The sli layer owns the mailbox object until the completion of mailbox
8384 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8385 * return codes the caller owns the mailbox command after the return of
8386 * the function.
8387 **/
James Smart3772a992009-05-22 14:50:54 -04008388static int
8389lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8390 uint32_t flag)
dea31012005-04-17 16:05:31 -05008391{
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008392 MAILBOX_t *mbx;
James Smart2e0fef82007-06-17 19:56:36 -05008393 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05008394 uint32_t status, evtctr;
James Smart9940b972011-03-11 16:06:12 -05008395 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -05008396 int i;
James Smart09372822008-01-11 01:52:54 -05008397 unsigned long timeout;
dea31012005-04-17 16:05:31 -05008398 unsigned long drvr_flag = 0;
James Smart34b02dc2008-08-24 21:49:55 -04008399 uint32_t word0, ldata;
dea31012005-04-17 16:05:31 -05008400 void __iomem *to_slim;
James Smart58da1ff2008-04-07 10:15:56 -04008401 int processing_queue = 0;
8402
8403 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8404 if (!pmbox) {
James Smart8568a4d2009-07-19 10:01:16 -04008405 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart58da1ff2008-04-07 10:15:56 -04008406 /* processing mbox queue from intr_handler */
James Smart3772a992009-05-22 14:50:54 -04008407 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8408 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8409 return MBX_SUCCESS;
8410 }
James Smart58da1ff2008-04-07 10:15:56 -04008411 processing_queue = 1;
James Smart58da1ff2008-04-07 10:15:56 -04008412 pmbox = lpfc_mbox_get(phba);
8413 if (!pmbox) {
8414 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8415 return MBX_SUCCESS;
8416 }
8417 }
dea31012005-04-17 16:05:31 -05008418
James Smarted957682007-06-17 19:56:37 -05008419 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
James Smart92d7f7b2007-06-17 19:56:38 -05008420 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
James Smarted957682007-06-17 19:56:37 -05008421 if(!pmbox->vport) {
James Smart58da1ff2008-04-07 10:15:56 -04008422 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
James Smarted957682007-06-17 19:56:37 -05008423 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05008424 LOG_MBOX | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04008425 "1806 Mbox x%x failed. No vport\n",
James Smart3772a992009-05-22 14:50:54 -04008426 pmbox->u.mb.mbxCommand);
James Smarted957682007-06-17 19:56:37 -05008427 dump_stack();
James Smart58da1ff2008-04-07 10:15:56 -04008428 goto out_not_finished;
James Smarted957682007-06-17 19:56:37 -05008429 }
8430 }
8431
Linas Vepstas8d63f372007-02-14 14:28:36 -06008432 /* If the PCI channel is in offline state, do not post mbox. */
James Smart58da1ff2008-04-07 10:15:56 -04008433 if (unlikely(pci_channel_offline(phba->pcidev))) {
8434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8435 goto out_not_finished;
8436 }
Linas Vepstas8d63f372007-02-14 14:28:36 -06008437
James Smarta257bf92009-04-06 18:48:10 -04008438 /* If HBA has a deferred error attention, fail the iocb. */
8439 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8440 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8441 goto out_not_finished;
8442 }
8443
dea31012005-04-17 16:05:31 -05008444 psli = &phba->sli;
James Smart92d7f7b2007-06-17 19:56:38 -05008445
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008446 mbx = &pmbox->u.mb;
dea31012005-04-17 16:05:31 -05008447 status = MBX_SUCCESS;
8448
James Smart2e0fef82007-06-17 19:56:36 -05008449 if (phba->link_state == LPFC_HBA_ERROR) {
8450 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05008451
8452 /* Mbox command <mbxCommand> cannot issue */
Dick Kennedy372c1872020-06-30 14:50:00 -07008453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04008454 "(%d):0311 Mailbox command x%x cannot "
8455 "issue Data: x%x x%x\n",
8456 pmbox->vport ? pmbox->vport->vpi : 0,
8457 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008458 goto out_not_finished;
Jamie Wellnitz41415862006-02-28 19:25:27 -05008459 }
8460
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008461 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
James Smart9940b972011-03-11 16:06:12 -05008462 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8463 !(hc_copy & HC_MBINT_ENA)) {
8464 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
Dick Kennedy372c1872020-06-30 14:50:00 -07008465 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04008466 "(%d):2528 Mailbox command x%x cannot "
8467 "issue Data: x%x x%x\n",
8468 pmbox->vport ? pmbox->vport->vpi : 0,
8469 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart9940b972011-03-11 16:06:12 -05008470 goto out_not_finished;
8471 }
James Smart92908312006-03-07 15:04:13 -05008472 }
8473
dea31012005-04-17 16:05:31 -05008474 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8475 /* Polling for a mbox command when another one is already active
8476 * is not allowed in SLI. Also, the driver must have established
8477 * SLI2 mode to queue and process multiple mbox commands.
8478 */
8479
8480 if (flag & MBX_POLL) {
James Smart2e0fef82007-06-17 19:56:36 -05008481 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008482
8483 /* Mbox command <mbxCommand> cannot issue */
Dick Kennedy372c1872020-06-30 14:50:00 -07008484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04008485 "(%d):2529 Mailbox command x%x "
8486 "cannot issue Data: x%x x%x\n",
8487 pmbox->vport ? pmbox->vport->vpi : 0,
8488 pmbox->u.mb.mbxCommand,
8489 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008490 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008491 }
8492
James Smart3772a992009-05-22 14:50:54 -04008493 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
James Smart2e0fef82007-06-17 19:56:36 -05008494 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008495 /* Mbox command <mbxCommand> cannot issue */
Dick Kennedy372c1872020-06-30 14:50:00 -07008496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04008497 "(%d):2530 Mailbox command x%x "
8498 "cannot issue Data: x%x x%x\n",
8499 pmbox->vport ? pmbox->vport->vpi : 0,
8500 pmbox->u.mb.mbxCommand,
8501 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008502 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008503 }
8504
dea31012005-04-17 16:05:31 -05008505 /* Another mailbox command is still being processed, queue this
8506 * command to be processed later.
8507 */
8508 lpfc_mbox_put(phba, pmbox);
8509
8510 /* Mbox cmd issue - BUSY */
James Smarted957682007-06-17 19:56:37 -05008511 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04008512 "(%d):0308 Mbox cmd issue - BUSY Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05008513 "x%x x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05008514 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
James Smarte92974f2017-06-01 21:07:06 -07008515 mbx->mbxCommand,
8516 phba->pport ? phba->pport->port_state : 0xff,
James Smart92d7f7b2007-06-17 19:56:38 -05008517 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05008518
8519 psli->slistat.mbox_busy++;
James Smart2e0fef82007-06-17 19:56:36 -05008520 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008521
James Smart858c9f62007-06-17 19:56:39 -05008522 if (pmbox->vport) {
8523 lpfc_debugfs_disc_trc(pmbox->vport,
8524 LPFC_DISC_TRC_MBOX_VPORT,
8525 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008526 (uint32_t)mbx->mbxCommand,
8527 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008528 }
8529 else {
8530 lpfc_debugfs_disc_trc(phba->pport,
8531 LPFC_DISC_TRC_MBOX,
8532 "MBOX Bsy: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008533 (uint32_t)mbx->mbxCommand,
8534 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008535 }
8536
James Smart2e0fef82007-06-17 19:56:36 -05008537 return MBX_BUSY;
dea31012005-04-17 16:05:31 -05008538 }
8539
dea31012005-04-17 16:05:31 -05008540 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8541
8542 /* If we are not polling, we MUST be in SLI2 mode */
8543 if (flag != MBX_POLL) {
James Smart3772a992009-05-22 14:50:54 -04008544 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008545 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea31012005-04-17 16:05:31 -05008546 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05008547 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05008548 /* Mbox command <mbxCommand> cannot issue */
Dick Kennedy372c1872020-06-30 14:50:00 -07008549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04008550 "(%d):2531 Mailbox command x%x "
8551 "cannot issue Data: x%x x%x\n",
8552 pmbox->vport ? pmbox->vport->vpi : 0,
8553 pmbox->u.mb.mbxCommand,
8554 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04008555 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008556 }
8557 /* timeout active mbox command */
James Smart256ec0d2013-04-17 20:14:58 -04008558 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8559 1000);
8560 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea31012005-04-17 16:05:31 -05008561 }
8562
8563 /* Mailbox cmd <cmd> issue */
James Smarted957682007-06-17 19:56:37 -05008564 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04008565 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05008566 "x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04008567 pmbox->vport ? pmbox->vport->vpi : 0,
James Smarte92974f2017-06-01 21:07:06 -07008568 mbx->mbxCommand,
8569 phba->pport ? phba->pport->port_state : 0xff,
James Smart92d7f7b2007-06-17 19:56:38 -05008570 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05008571
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008572 if (mbx->mbxCommand != MBX_HEARTBEAT) {
James Smart858c9f62007-06-17 19:56:39 -05008573 if (pmbox->vport) {
8574 lpfc_debugfs_disc_trc(pmbox->vport,
8575 LPFC_DISC_TRC_MBOX_VPORT,
8576 "MBOX Send vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008577 (uint32_t)mbx->mbxCommand,
8578 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008579 }
8580 else {
8581 lpfc_debugfs_disc_trc(phba->pport,
8582 LPFC_DISC_TRC_MBOX,
8583 "MBOX Send: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008584 (uint32_t)mbx->mbxCommand,
8585 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05008586 }
8587 }
8588
dea31012005-04-17 16:05:31 -05008589 psli->slistat.mbox_cmd++;
8590 evtctr = psli->slistat.mbox_event;
8591
8592 /* next set own bit for the adapter and copy over command word */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008593 mbx->mbxOwner = OWN_CHIP;
dea31012005-04-17 16:05:31 -05008594
James Smart3772a992009-05-22 14:50:54 -04008595 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart7a470272010-03-15 11:25:20 -04008596 /* Populate mbox extension offset word. */
8597 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008598 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04008599 = (uint8_t *)phba->mbox_ext
8600 - (uint8_t *)phba->mbox;
8601 }
8602
8603 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008604 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8605 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8606 (uint8_t *)phba->mbox_ext,
8607 pmbox->in_ext_byte_len);
James Smart7a470272010-03-15 11:25:20 -04008608 }
8609 /* Copy command data to host SLIM area */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008610 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05008611 } else {
James Smart7a470272010-03-15 11:25:20 -04008612 /* Populate mbox extension offset word. */
8613 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008614 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04008615 = MAILBOX_HBA_EXT_OFFSET;
8616
8617 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008618 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
James Smart7a470272010-03-15 11:25:20 -04008619 lpfc_memcpy_to_slim(phba->MBslimaddr +
8620 MAILBOX_HBA_EXT_OFFSET,
James Smart3e1f0712018-11-29 16:09:29 -08008621 pmbox->ctx_buf, pmbox->in_ext_byte_len);
James Smart7a470272010-03-15 11:25:20 -04008622
James Smart895427b2017-02-12 13:52:30 -08008623 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05008624 /* copy command data into host mbox for cmpl */
James Smart895427b2017-02-12 13:52:30 -08008625 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8626 MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05008627
8628 /* First copy mbox command data to HBA SLIM, skip past first
8629 word */
8630 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008631 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea31012005-04-17 16:05:31 -05008632 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8633
8634 /* Next copy over first word, with mbxOwner set */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008635 ldata = *((uint32_t *)mbx);
dea31012005-04-17 16:05:31 -05008636 to_slim = phba->MBslimaddr;
8637 writel(ldata, to_slim);
8638 readl(to_slim); /* flush */
8639
James Smart895427b2017-02-12 13:52:30 -08008640 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05008641 /* switch over to host mailbox */
James Smart3772a992009-05-22 14:50:54 -04008642 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05008643 }
8644
8645 wmb();
dea31012005-04-17 16:05:31 -05008646
8647 switch (flag) {
8648 case MBX_NOWAIT:
James Smart09372822008-01-11 01:52:54 -05008649 /* Set up reference to mailbox command */
dea31012005-04-17 16:05:31 -05008650 psli->mbox_active = pmbox;
James Smart09372822008-01-11 01:52:54 -05008651 /* Interrupt board to do it */
8652 writel(CA_MBATT, phba->CAregaddr);
8653 readl(phba->CAregaddr); /* flush */
8654 /* Don't wait for it to finish, just return */
dea31012005-04-17 16:05:31 -05008655 break;
8656
8657 case MBX_POLL:
James Smart09372822008-01-11 01:52:54 -05008658 /* Set up null reference to mailbox command */
dea31012005-04-17 16:05:31 -05008659 psli->mbox_active = NULL;
James Smart09372822008-01-11 01:52:54 -05008660 /* Interrupt board to do it */
8661 writel(CA_MBATT, phba->CAregaddr);
8662 readl(phba->CAregaddr); /* flush */
8663
James Smart3772a992009-05-22 14:50:54 -04008664 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05008665 /* First read mbox status word */
James Smart34b02dc2008-08-24 21:49:55 -04008666 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05008667 word0 = le32_to_cpu(word0);
8668 } else {
8669 /* First read mbox status word */
James Smart9940b972011-03-11 16:06:12 -05008670 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8671 spin_unlock_irqrestore(&phba->hbalock,
8672 drvr_flag);
8673 goto out_not_finished;
8674 }
dea31012005-04-17 16:05:31 -05008675 }
8676
8677 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05008678 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8679 spin_unlock_irqrestore(&phba->hbalock,
8680 drvr_flag);
8681 goto out_not_finished;
8682 }
James Smarta183a152011-10-10 21:32:43 -04008683 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8684 1000) + jiffies;
James Smart09372822008-01-11 01:52:54 -05008685 i = 0;
dea31012005-04-17 16:05:31 -05008686 /* Wait for command to complete */
Jamie Wellnitz41415862006-02-28 19:25:27 -05008687 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8688 (!(ha_copy & HA_MBATT) &&
James Smart2e0fef82007-06-17 19:56:36 -05008689 (phba->link_state > LPFC_WARM_START))) {
James Smart09372822008-01-11 01:52:54 -05008690 if (time_after(jiffies, timeout)) {
dea31012005-04-17 16:05:31 -05008691 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05008692 spin_unlock_irqrestore(&phba->hbalock,
dea31012005-04-17 16:05:31 -05008693 drvr_flag);
James Smart58da1ff2008-04-07 10:15:56 -04008694 goto out_not_finished;
dea31012005-04-17 16:05:31 -05008695 }
8696
8697 /* Check if we took a mbox interrupt while we were
8698 polling */
8699 if (((word0 & OWN_CHIP) != OWN_CHIP)
8700 && (evtctr != psli->slistat.mbox_event))
8701 break;
8702
James Smart09372822008-01-11 01:52:54 -05008703 if (i++ > 10) {
8704 spin_unlock_irqrestore(&phba->hbalock,
8705 drvr_flag);
8706 msleep(1);
8707 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8708 }
dea31012005-04-17 16:05:31 -05008709
James Smart3772a992009-05-22 14:50:54 -04008710 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05008711 /* First copy command data */
James Smart34b02dc2008-08-24 21:49:55 -04008712 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05008713 word0 = le32_to_cpu(word0);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008714 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea31012005-04-17 16:05:31 -05008715 MAILBOX_t *slimmb;
James Smart34b02dc2008-08-24 21:49:55 -04008716 uint32_t slimword0;
dea31012005-04-17 16:05:31 -05008717 /* Check real SLIM for any errors */
8718 slimword0 = readl(phba->MBslimaddr);
8719 slimmb = (MAILBOX_t *) & slimword0;
8720 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8721 && slimmb->mbxStatus) {
8722 psli->sli_flag &=
James Smart3772a992009-05-22 14:50:54 -04008723 ~LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05008724 word0 = slimword0;
8725 }
8726 }
8727 } else {
8728 /* First copy command data */
8729 word0 = readl(phba->MBslimaddr);
8730 }
8731 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05008732 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8733 spin_unlock_irqrestore(&phba->hbalock,
8734 drvr_flag);
8735 goto out_not_finished;
8736 }
dea31012005-04-17 16:05:31 -05008737 }
8738
James Smart3772a992009-05-22 14:50:54 -04008739 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05008740 /* copy results back to user */
James Smart2ea259e2017-02-12 13:52:27 -08008741 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8742 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04008743 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008744 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
James Smart7a470272010-03-15 11:25:20 -04008745 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
James Smart3e1f0712018-11-29 16:09:29 -08008746 pmbox->ctx_buf,
James Smart7a470272010-03-15 11:25:20 -04008747 pmbox->out_ext_byte_len);
8748 }
dea31012005-04-17 16:05:31 -05008749 } else {
8750 /* First copy command data */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008751 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
James Smart2ea259e2017-02-12 13:52:27 -08008752 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04008753 /* Copy the mailbox extension data */
James Smart3e1f0712018-11-29 16:09:29 -08008754 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8755 lpfc_memcpy_from_slim(
8756 pmbox->ctx_buf,
James Smart7a470272010-03-15 11:25:20 -04008757 phba->MBslimaddr +
8758 MAILBOX_HBA_EXT_OFFSET,
8759 pmbox->out_ext_byte_len);
dea31012005-04-17 16:05:31 -05008760 }
8761 }
8762
8763 writel(HA_MBATT, phba->HAregaddr);
8764 readl(phba->HAregaddr); /* flush */
8765
8766 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
Randy Dunlapbf07bde2013-02-22 10:23:54 -08008767 status = mbx->mbxStatus;
dea31012005-04-17 16:05:31 -05008768 }
8769
James Smart2e0fef82007-06-17 19:56:36 -05008770 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8771 return status;
James Smart58da1ff2008-04-07 10:15:56 -04008772
8773out_not_finished:
8774 if (processing_queue) {
James Smartda0436e2009-05-22 14:51:39 -04008775 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
James Smart58da1ff2008-04-07 10:15:56 -04008776 lpfc_mbox_cmpl_put(phba, pmbox);
8777 }
8778 return MBX_NOT_FINISHED;
dea31012005-04-17 16:05:31 -05008779}
8780
James Smarte59058c2008-08-24 21:49:00 -04008781/**
James Smartf1126682009-06-10 17:22:44 -04008782 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8783 * @phba: Pointer to HBA context object.
8784 *
8785 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8786 * the driver internal pending mailbox queue. It will then try to wait out the
8787 * possible outstanding mailbox command before return.
8788 *
8789 * Returns:
8790 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8791 * the outstanding mailbox command timed out.
8792 **/
8793static int
8794lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8795{
8796 struct lpfc_sli *psli = &phba->sli;
James Smartf1126682009-06-10 17:22:44 -04008797 int rc = 0;
James Smarta183a152011-10-10 21:32:43 -04008798 unsigned long timeout = 0;
James Smartf1126682009-06-10 17:22:44 -04008799
8800 /* Mark the asynchronous mailbox command posting as blocked */
8801 spin_lock_irq(&phba->hbalock);
8802 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smartf1126682009-06-10 17:22:44 -04008803 /* Determine how long we might wait for the active mailbox
8804 * command to be gracefully completed by firmware.
8805 */
James Smarta183a152011-10-10 21:32:43 -04008806 if (phba->sli.mbox_active)
8807 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8808 phba->sli.mbox_active) *
8809 1000) + jiffies;
8810 spin_unlock_irq(&phba->hbalock);
8811
James Smarte8d3c3b2013-10-10 12:21:30 -04008812 /* Make sure the mailbox is really active */
8813 if (timeout)
8814 lpfc_sli4_process_missed_mbox_completions(phba);
8815
James Smartf1126682009-06-10 17:22:44 -04008816 /* Wait for the outstnading mailbox command to complete */
8817 while (phba->sli.mbox_active) {
8818 /* Check active mailbox complete status every 2ms */
8819 msleep(2);
8820 if (time_after(jiffies, timeout)) {
8821 /* Timeout, marked the outstanding cmd not complete */
8822 rc = 1;
8823 break;
8824 }
8825 }
8826
8827 /* Can not cleanly block async mailbox command, fails it */
8828 if (rc) {
8829 spin_lock_irq(&phba->hbalock);
8830 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8831 spin_unlock_irq(&phba->hbalock);
8832 }
8833 return rc;
8834}
8835
8836/**
8837 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8838 * @phba: Pointer to HBA context object.
8839 *
8840 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8841 * commands from the driver internal pending mailbox queue. It makes sure
8842 * that there is no outstanding mailbox command before resuming posting
8843 * asynchronous mailbox commands. If, for any reason, there is outstanding
8844 * mailbox command, it will try to wait it out before resuming asynchronous
8845 * mailbox command posting.
8846 **/
8847static void
8848lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8849{
8850 struct lpfc_sli *psli = &phba->sli;
8851
8852 spin_lock_irq(&phba->hbalock);
8853 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8854 /* Asynchronous mailbox posting is not blocked, do nothing */
8855 spin_unlock_irq(&phba->hbalock);
8856 return;
8857 }
8858
8859 /* Outstanding synchronous mailbox command is guaranteed to be done,
8860 * successful or timeout, after timing-out the outstanding mailbox
8861 * command shall always be removed, so just unblock posting async
8862 * mailbox command and resume
8863 */
8864 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8865 spin_unlock_irq(&phba->hbalock);
8866
Colin Ian King291c2542019-12-18 08:43:01 +00008867 /* wake up worker thread to post asynchronous mailbox command */
James Smartf1126682009-06-10 17:22:44 -04008868 lpfc_worker_wake_up(phba);
8869}
8870
8871/**
James Smart2d843ed2012-09-29 11:29:06 -04008872 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8873 * @phba: Pointer to HBA context object.
8874 * @mboxq: Pointer to mailbox object.
8875 *
8876 * The function waits for the bootstrap mailbox register ready bit from
8877 * port for twice the regular mailbox command timeout value.
8878 *
8879 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8880 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8881 **/
8882static int
8883lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8884{
8885 uint32_t db_ready;
8886 unsigned long timeout;
8887 struct lpfc_register bmbx_reg;
8888
8889 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8890 * 1000) + jiffies;
8891
8892 do {
8893 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8894 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8895 if (!db_ready)
James Smarte2ffe4d2019-03-12 16:30:15 -07008896 mdelay(2);
James Smart2d843ed2012-09-29 11:29:06 -04008897
8898 if (time_after(jiffies, timeout))
8899 return MBXERR_ERROR;
8900 } while (!db_ready);
8901
8902 return 0;
8903}
8904
8905/**
James Smartda0436e2009-05-22 14:51:39 -04008906 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8907 * @phba: Pointer to HBA context object.
8908 * @mboxq: Pointer to mailbox object.
8909 *
8910 * The function posts a mailbox to the port. The mailbox is expected
8911 * to be comletely filled in and ready for the port to operate on it.
8912 * This routine executes a synchronous completion operation on the
8913 * mailbox by polling for its completion.
8914 *
8915 * The caller must not be holding any locks when calling this routine.
8916 *
8917 * Returns:
8918 * MBX_SUCCESS - mailbox posted successfully
8919 * Any of the MBX error values.
8920 **/
8921static int
8922lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8923{
8924 int rc = MBX_SUCCESS;
8925 unsigned long iflag;
James Smartda0436e2009-05-22 14:51:39 -04008926 uint32_t mcqe_status;
8927 uint32_t mbx_cmnd;
James Smartda0436e2009-05-22 14:51:39 -04008928 struct lpfc_sli *psli = &phba->sli;
8929 struct lpfc_mqe *mb = &mboxq->u.mqe;
8930 struct lpfc_bmbx_create *mbox_rgn;
8931 struct dma_address *dma_address;
James Smartda0436e2009-05-22 14:51:39 -04008932
8933 /*
8934 * Only one mailbox can be active to the bootstrap mailbox region
8935 * at a time and there is no queueing provided.
8936 */
8937 spin_lock_irqsave(&phba->hbalock, iflag);
8938 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8939 spin_unlock_irqrestore(&phba->hbalock, iflag);
Dick Kennedy372c1872020-06-30 14:50:00 -07008940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta183a152011-10-10 21:32:43 -04008941 "(%d):2532 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008942 "cannot issue Data: x%x x%x\n",
8943 mboxq->vport ? mboxq->vport->vpi : 0,
8944 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008945 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8946 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008947 psli->sli_flag, MBX_POLL);
8948 return MBXERR_ERROR;
8949 }
8950 /* The server grabs the token and owns it until release */
8951 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8952 phba->sli.mbox_active = mboxq;
8953 spin_unlock_irqrestore(&phba->hbalock, iflag);
8954
James Smart2d843ed2012-09-29 11:29:06 -04008955 /* wait for bootstrap mbox register for readyness */
8956 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8957 if (rc)
8958 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04008959 /*
8960 * Initialize the bootstrap memory region to avoid stale data areas
8961 * in the mailbox post. Then copy the caller's mailbox contents to
8962 * the bmbx mailbox region.
8963 */
8964 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8965 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
James Smart48f8fdb2018-05-04 20:37:51 -07008966 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8967 sizeof(struct lpfc_mqe));
James Smartda0436e2009-05-22 14:51:39 -04008968
8969 /* Post the high mailbox dma address to the port and wait for ready. */
8970 dma_address = &phba->sli4_hba.bmbx.dma_address;
8971 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8972
James Smart2d843ed2012-09-29 11:29:06 -04008973 /* wait for bootstrap mbox register for hi-address write done */
8974 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8975 if (rc)
8976 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04008977
8978 /* Post the low mailbox dma address to the port. */
8979 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
James Smartda0436e2009-05-22 14:51:39 -04008980
James Smart2d843ed2012-09-29 11:29:06 -04008981 /* wait for bootstrap mbox register for low address write done */
8982 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8983 if (rc)
8984 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04008985
8986 /*
8987 * Read the CQ to ensure the mailbox has completed.
8988 * If so, update the mailbox status so that the upper layers
8989 * can complete the request normally.
8990 */
James Smart48f8fdb2018-05-04 20:37:51 -07008991 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8992 sizeof(struct lpfc_mqe));
James Smartda0436e2009-05-22 14:51:39 -04008993 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
James Smart48f8fdb2018-05-04 20:37:51 -07008994 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8995 sizeof(struct lpfc_mcqe));
James Smartda0436e2009-05-22 14:51:39 -04008996 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
James Smart05580562011-05-24 11:40:48 -04008997 /*
8998 * When the CQE status indicates a failure and the mailbox status
8999 * indicates success then copy the CQE status into the mailbox status
9000 * (and prefix it with x4000).
9001 */
James Smartda0436e2009-05-22 14:51:39 -04009002 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
James Smart05580562011-05-24 11:40:48 -04009003 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9004 bf_set(lpfc_mqe_status, mb,
9005 (LPFC_MBX_ERROR_RANGE | mcqe_status));
James Smartda0436e2009-05-22 14:51:39 -04009006 rc = MBXERR_ERROR;
James Smartd7c47992010-06-08 18:31:54 -04009007 } else
9008 lpfc_sli4_swap_str(phba, mboxq);
James Smartda0436e2009-05-22 14:51:39 -04009009
9010 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04009011 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
James Smartda0436e2009-05-22 14:51:39 -04009012 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9013 " x%x x%x CQ: x%x x%x x%x x%x\n",
James Smarta183a152011-10-10 21:32:43 -04009014 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9015 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9016 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04009017 bf_get(lpfc_mqe_status, mb),
9018 mb->un.mb_words[0], mb->un.mb_words[1],
9019 mb->un.mb_words[2], mb->un.mb_words[3],
9020 mb->un.mb_words[4], mb->un.mb_words[5],
9021 mb->un.mb_words[6], mb->un.mb_words[7],
9022 mb->un.mb_words[8], mb->un.mb_words[9],
9023 mb->un.mb_words[10], mb->un.mb_words[11],
9024 mb->un.mb_words[12], mboxq->mcqe.word0,
9025 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9026 mboxq->mcqe.trailer);
9027exit:
9028 /* We are holding the token, no needed for lock when release */
9029 spin_lock_irqsave(&phba->hbalock, iflag);
9030 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9031 phba->sli.mbox_active = NULL;
9032 spin_unlock_irqrestore(&phba->hbalock, iflag);
9033 return rc;
9034}
9035
9036/**
9037 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9038 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01009039 * @mboxq: Pointer to mailbox object.
James Smartda0436e2009-05-22 14:51:39 -04009040 * @flag: Flag indicating how the mailbox need to be processed.
9041 *
9042 * This function is called by discovery code and HBA management code to submit
9043 * a mailbox command to firmware with SLI-4 interface spec.
9044 *
9045 * Return codes the caller owns the mailbox command after the return of the
9046 * function.
9047 **/
9048static int
9049lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9050 uint32_t flag)
9051{
9052 struct lpfc_sli *psli = &phba->sli;
9053 unsigned long iflags;
9054 int rc;
9055
James Smartb76f2dc2011-07-22 18:37:42 -04009056 /* dump from issue mailbox command if setup */
9057 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9058
James Smart8fa38512009-07-19 10:01:03 -04009059 rc = lpfc_mbox_dev_check(phba);
9060 if (unlikely(rc)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta183a152011-10-10 21:32:43 -04009062 "(%d):2544 Mailbox command x%x (x%x/x%x) "
James Smart8fa38512009-07-19 10:01:03 -04009063 "cannot issue Data: x%x x%x\n",
9064 mboxq->vport ? mboxq->vport->vpi : 0,
9065 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04009066 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9067 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smart8fa38512009-07-19 10:01:03 -04009068 psli->sli_flag, flag);
9069 goto out_not_finished;
9070 }
9071
James Smartda0436e2009-05-22 14:51:39 -04009072 /* Detect polling mode and jump to a handler */
9073 if (!phba->sli4_hba.intr_enable) {
9074 if (flag == MBX_POLL)
9075 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9076 else
9077 rc = -EIO;
9078 if (rc != MBX_SUCCESS)
James Smart05580562011-05-24 11:40:48 -04009079 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
James Smartda0436e2009-05-22 14:51:39 -04009080 "(%d):2541 Mailbox command x%x "
James Smartcc459f12012-05-09 21:18:30 -04009081 "(x%x/x%x) failure: "
9082 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9083 "Data: x%x x%x\n,",
James Smartda0436e2009-05-22 14:51:39 -04009084 mboxq->vport ? mboxq->vport->vpi : 0,
9085 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04009086 lpfc_sli_config_mbox_subsys_get(phba,
9087 mboxq),
9088 lpfc_sli_config_mbox_opcode_get(phba,
9089 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04009090 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9091 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9092 bf_get(lpfc_mcqe_ext_status,
9093 &mboxq->mcqe),
James Smartda0436e2009-05-22 14:51:39 -04009094 psli->sli_flag, flag);
9095 return rc;
9096 } else if (flag == MBX_POLL) {
James Smartf1126682009-06-10 17:22:44 -04009097 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9098 "(%d):2542 Try to issue mailbox command "
James Smart7365f6f2018-02-22 08:18:46 -08009099 "x%x (x%x/x%x) synchronously ahead of async "
James Smartf1126682009-06-10 17:22:44 -04009100 "mailbox command queue: x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04009101 mboxq->vport ? mboxq->vport->vpi : 0,
9102 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04009103 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9104 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04009105 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04009106 /* Try to block the asynchronous mailbox posting */
9107 rc = lpfc_sli4_async_mbox_block(phba);
9108 if (!rc) {
9109 /* Successfully blocked, now issue sync mbox cmd */
9110 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9111 if (rc != MBX_SUCCESS)
James Smartcc459f12012-05-09 21:18:30 -04009112 lpfc_printf_log(phba, KERN_WARNING,
James Smarta183a152011-10-10 21:32:43 -04009113 LOG_MBOX | LOG_SLI,
James Smartcc459f12012-05-09 21:18:30 -04009114 "(%d):2597 Sync Mailbox command "
9115 "x%x (x%x/x%x) failure: "
9116 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9117 "Data: x%x x%x\n,",
9118 mboxq->vport ? mboxq->vport->vpi : 0,
James Smarta183a152011-10-10 21:32:43 -04009119 mboxq->u.mb.mbxCommand,
9120 lpfc_sli_config_mbox_subsys_get(phba,
9121 mboxq),
9122 lpfc_sli_config_mbox_opcode_get(phba,
9123 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04009124 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9125 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9126 bf_get(lpfc_mcqe_ext_status,
9127 &mboxq->mcqe),
James Smarta183a152011-10-10 21:32:43 -04009128 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04009129 /* Unblock the async mailbox posting afterward */
9130 lpfc_sli4_async_mbox_unblock(phba);
9131 }
9132 return rc;
James Smartda0436e2009-05-22 14:51:39 -04009133 }
9134
Colin Ian King291c2542019-12-18 08:43:01 +00009135 /* Now, interrupt mode asynchronous mailbox command */
James Smartda0436e2009-05-22 14:51:39 -04009136 rc = lpfc_mbox_cmd_check(phba, mboxq);
9137 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009138 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta183a152011-10-10 21:32:43 -04009139 "(%d):2543 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04009140 "cannot issue Data: x%x x%x\n",
9141 mboxq->vport ? mboxq->vport->vpi : 0,
9142 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04009143 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9144 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04009145 psli->sli_flag, flag);
9146 goto out_not_finished;
9147 }
James Smartda0436e2009-05-22 14:51:39 -04009148
9149 /* Put the mailbox command to the driver internal FIFO */
9150 psli->slistat.mbox_busy++;
9151 spin_lock_irqsave(&phba->hbalock, iflags);
9152 lpfc_mbox_put(phba, mboxq);
9153 spin_unlock_irqrestore(&phba->hbalock, iflags);
9154 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9155 "(%d):0354 Mbox cmd issue - Enqueue Data: "
James Smarta183a152011-10-10 21:32:43 -04009156 "x%x (x%x/x%x) x%x x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04009157 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9158 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
James Smarta183a152011-10-10 21:32:43 -04009159 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9160 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04009161 phba->pport->port_state,
9162 psli->sli_flag, MBX_NOWAIT);
9163 /* Wake up worker thread to transport mailbox command from head */
9164 lpfc_worker_wake_up(phba);
9165
9166 return MBX_BUSY;
9167
9168out_not_finished:
9169 return MBX_NOT_FINISHED;
9170}
9171
9172/**
9173 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9174 * @phba: Pointer to HBA context object.
9175 *
9176 * This function is called by worker thread to send a mailbox command to
9177 * SLI4 HBA firmware.
9178 *
9179 **/
9180int
9181lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9182{
9183 struct lpfc_sli *psli = &phba->sli;
9184 LPFC_MBOXQ_t *mboxq;
9185 int rc = MBX_SUCCESS;
9186 unsigned long iflags;
9187 struct lpfc_mqe *mqe;
9188 uint32_t mbx_cmnd;
9189
9190 /* Check interrupt mode before post async mailbox command */
9191 if (unlikely(!phba->sli4_hba.intr_enable))
9192 return MBX_NOT_FINISHED;
9193
9194 /* Check for mailbox command service token */
9195 spin_lock_irqsave(&phba->hbalock, iflags);
9196 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9197 spin_unlock_irqrestore(&phba->hbalock, iflags);
9198 return MBX_NOT_FINISHED;
9199 }
9200 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9201 spin_unlock_irqrestore(&phba->hbalock, iflags);
9202 return MBX_NOT_FINISHED;
9203 }
9204 if (unlikely(phba->sli.mbox_active)) {
9205 spin_unlock_irqrestore(&phba->hbalock, iflags);
Dick Kennedy372c1872020-06-30 14:50:00 -07009206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009207 "0384 There is pending active mailbox cmd\n");
9208 return MBX_NOT_FINISHED;
9209 }
9210 /* Take the mailbox command service token */
9211 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9212
9213 /* Get the next mailbox command from head of queue */
9214 mboxq = lpfc_mbox_get(phba);
9215
9216 /* If no more mailbox command waiting for post, we're done */
9217 if (!mboxq) {
9218 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9219 spin_unlock_irqrestore(&phba->hbalock, iflags);
9220 return MBX_SUCCESS;
9221 }
9222 phba->sli.mbox_active = mboxq;
9223 spin_unlock_irqrestore(&phba->hbalock, iflags);
9224
9225 /* Check device readiness for posting mailbox command */
9226 rc = lpfc_mbox_dev_check(phba);
9227 if (unlikely(rc))
9228 /* Driver clean routine will clean up pending mailbox */
9229 goto out_not_finished;
9230
9231 /* Prepare the mbox command to be posted */
9232 mqe = &mboxq->u.mqe;
9233 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9234
9235 /* Start timer for the mbox_tmo and log some mailbox post messages */
9236 mod_timer(&psli->mbox_tmo, (jiffies +
James Smart256ec0d2013-04-17 20:14:58 -04009237 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
James Smartda0436e2009-05-22 14:51:39 -04009238
9239 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04009240 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
James Smartda0436e2009-05-22 14:51:39 -04009241 "x%x x%x\n",
9242 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
James Smarta183a152011-10-10 21:32:43 -04009243 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9244 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04009245 phba->pport->port_state, psli->sli_flag);
9246
9247 if (mbx_cmnd != MBX_HEARTBEAT) {
9248 if (mboxq->vport) {
9249 lpfc_debugfs_disc_trc(mboxq->vport,
9250 LPFC_DISC_TRC_MBOX_VPORT,
9251 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9252 mbx_cmnd, mqe->un.mb_words[0],
9253 mqe->un.mb_words[1]);
9254 } else {
9255 lpfc_debugfs_disc_trc(phba->pport,
9256 LPFC_DISC_TRC_MBOX,
9257 "MBOX Send: cmd:x%x mb:x%x x%x",
9258 mbx_cmnd, mqe->un.mb_words[0],
9259 mqe->un.mb_words[1]);
9260 }
9261 }
9262 psli->slistat.mbox_cmd++;
9263
9264 /* Post the mailbox command to the port */
9265 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9266 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009267 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta183a152011-10-10 21:32:43 -04009268 "(%d):2533 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04009269 "cannot issue Data: x%x x%x\n",
9270 mboxq->vport ? mboxq->vport->vpi : 0,
9271 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04009272 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9273 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04009274 psli->sli_flag, MBX_NOWAIT);
9275 goto out_not_finished;
9276 }
9277
9278 return rc;
9279
9280out_not_finished:
9281 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartd7069f02012-03-01 22:36:29 -05009282 if (phba->sli.mbox_active) {
9283 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9284 __lpfc_mbox_cmpl_put(phba, mboxq);
9285 /* Release the token */
9286 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9287 phba->sli.mbox_active = NULL;
9288 }
James Smartda0436e2009-05-22 14:51:39 -04009289 spin_unlock_irqrestore(&phba->hbalock, iflags);
9290
9291 return MBX_NOT_FINISHED;
9292}
9293
9294/**
9295 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9296 * @phba: Pointer to HBA context object.
9297 * @pmbox: Pointer to mailbox object.
9298 * @flag: Flag indicating how the mailbox need to be processed.
9299 *
9300 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9301 * the API jump table function pointer from the lpfc_hba struct.
9302 *
9303 * Return codes the caller owns the mailbox command after the return of the
9304 * function.
9305 **/
9306int
9307lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9308{
9309 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9310}
9311
9312/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009313 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
James Smartda0436e2009-05-22 14:51:39 -04009314 * @phba: The hba struct for which this call is being executed.
9315 * @dev_grp: The HBA PCI-Device group number.
9316 *
9317 * This routine sets up the mbox interface API function jump table in @phba
9318 * struct.
9319 * Returns: 0 - success, -ENODEV - failure.
9320 **/
9321int
9322lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9323{
9324
9325 switch (dev_grp) {
9326 case LPFC_PCI_DEV_LP:
9327 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9328 phba->lpfc_sli_handle_slow_ring_event =
9329 lpfc_sli_handle_slow_ring_event_s3;
9330 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9331 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9332 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9333 break;
9334 case LPFC_PCI_DEV_OC:
9335 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9336 phba->lpfc_sli_handle_slow_ring_event =
9337 lpfc_sli_handle_slow_ring_event_s4;
9338 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9339 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9340 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9341 break;
9342 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07009343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009344 "1420 Invalid HBA PCI-device group: 0x%x\n",
9345 dev_grp);
9346 return -ENODEV;
James Smartda0436e2009-05-22 14:51:39 -04009347 }
9348 return 0;
9349}
9350
9351/**
James Smart3621a712009-04-06 18:47:14 -04009352 * __lpfc_sli_ringtx_put - Add an iocb to the txq
James Smarte59058c2008-08-24 21:49:00 -04009353 * @phba: Pointer to HBA context object.
9354 * @pring: Pointer to driver SLI ring object.
9355 * @piocb: Pointer to address of newly added command iocb.
9356 *
James Smart27f3efd2019-10-18 14:18:19 -07009357 * This function is called with hbalock held for SLI3 ports or
9358 * the ring lock held for SLI4 ports to add a command
James Smarte59058c2008-08-24 21:49:00 -04009359 * iocb to the txq when SLI layer cannot submit the command iocb
9360 * to the ring.
9361 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04009362void
James Smart92d7f7b2007-06-17 19:56:38 -05009363__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05009364 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05009365{
James Smart27f3efd2019-10-18 14:18:19 -07009366 if (phba->sli_rev == LPFC_SLI_REV4)
9367 lockdep_assert_held(&pring->ring_lock);
9368 else
9369 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05009370 /* Insert the caller's iocb in the txq tail for later processing. */
9371 list_add_tail(&piocb->list, &pring->txq);
dea31012005-04-17 16:05:31 -05009372}
9373
James Smarte59058c2008-08-24 21:49:00 -04009374/**
James Smart3621a712009-04-06 18:47:14 -04009375 * lpfc_sli_next_iocb - Get the next iocb in the txq
James Smarte59058c2008-08-24 21:49:00 -04009376 * @phba: Pointer to HBA context object.
9377 * @pring: Pointer to driver SLI ring object.
9378 * @piocb: Pointer to address of newly added command iocb.
9379 *
9380 * This function is called with hbalock held before a new
9381 * iocb is submitted to the firmware. This function checks
9382 * txq to flush the iocbs in txq to Firmware before
9383 * submitting new iocbs to the Firmware.
9384 * If there are iocbs in the txq which need to be submitted
9385 * to firmware, lpfc_sli_next_iocb returns the first element
9386 * of the txq after dequeuing it from txq.
9387 * If there is no iocb in the txq then the function will return
9388 * *piocb and *piocb is set to NULL. Caller needs to check
9389 * *piocb to find if there are more commands in the txq.
9390 **/
dea31012005-04-17 16:05:31 -05009391static struct lpfc_iocbq *
9392lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05009393 struct lpfc_iocbq **piocb)
dea31012005-04-17 16:05:31 -05009394{
9395 struct lpfc_iocbq * nextiocb;
9396
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009397 lockdep_assert_held(&phba->hbalock);
9398
dea31012005-04-17 16:05:31 -05009399 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9400 if (!nextiocb) {
9401 nextiocb = *piocb;
9402 *piocb = NULL;
9403 }
9404
9405 return nextiocb;
9406}
9407
James Smarte59058c2008-08-24 21:49:00 -04009408/**
James Smart3772a992009-05-22 14:50:54 -04009409 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04009410 * @phba: Pointer to HBA context object.
James Smart3772a992009-05-22 14:50:54 -04009411 * @ring_number: SLI ring number to issue iocb on.
James Smarte59058c2008-08-24 21:49:00 -04009412 * @piocb: Pointer to command iocb.
9413 * @flag: Flag indicating if this command can be put into txq.
9414 *
James Smart3772a992009-05-22 14:50:54 -04009415 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9416 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9417 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9418 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9419 * this function allows only iocbs for posting buffers. This function finds
9420 * next available slot in the command ring and posts the command to the
9421 * available slot and writes the port attention register to request HBA start
9422 * processing new iocb. If there is no slot available in the ring and
9423 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9424 * the function returns IOCB_BUSY.
James Smarte59058c2008-08-24 21:49:00 -04009425 *
James Smart3772a992009-05-22 14:50:54 -04009426 * This function is called with hbalock held. The function will return success
9427 * after it successfully submit the iocb to firmware or after adding to the
9428 * txq.
James Smarte59058c2008-08-24 21:49:00 -04009429 **/
James Smart98c9ea52007-10-27 13:37:33 -04009430static int
James Smart3772a992009-05-22 14:50:54 -04009431__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea31012005-04-17 16:05:31 -05009432 struct lpfc_iocbq *piocb, uint32_t flag)
9433{
9434 struct lpfc_iocbq *nextiocb;
9435 IOCB_t *iocb;
James Smart895427b2017-02-12 13:52:30 -08009436 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -05009437
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009438 lockdep_assert_held(&phba->hbalock);
9439
James Smart92d7f7b2007-06-17 19:56:38 -05009440 if (piocb->iocb_cmpl && (!piocb->vport) &&
9441 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9442 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009443 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04009444 "1807 IOCB x%x failed. No vport\n",
James Smart92d7f7b2007-06-17 19:56:38 -05009445 piocb->iocb.ulpCommand);
9446 dump_stack();
9447 return IOCB_ERROR;
9448 }
9449
9450
Linas Vepstas8d63f372007-02-14 14:28:36 -06009451 /* If the PCI channel is in offline state, do not post iocbs. */
9452 if (unlikely(pci_channel_offline(phba->pcidev)))
9453 return IOCB_ERROR;
9454
James Smarta257bf92009-04-06 18:48:10 -04009455 /* If HBA has a deferred error attention, fail the iocb. */
9456 if (unlikely(phba->hba_flag & DEFER_ERATT))
9457 return IOCB_ERROR;
9458
dea31012005-04-17 16:05:31 -05009459 /*
9460 * We should never get an IOCB if we are in a < LINK_DOWN state
9461 */
James Smart2e0fef82007-06-17 19:56:36 -05009462 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea31012005-04-17 16:05:31 -05009463 return IOCB_ERROR;
9464
9465 /*
9466 * Check to see if we are blocking IOCB processing because of a
James Smart0b727fe2007-10-27 13:37:25 -04009467 * outstanding event.
dea31012005-04-17 16:05:31 -05009468 */
James Smart0b727fe2007-10-27 13:37:25 -04009469 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea31012005-04-17 16:05:31 -05009470 goto iocb_busy;
9471
James Smart2e0fef82007-06-17 19:56:36 -05009472 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea31012005-04-17 16:05:31 -05009473 /*
James Smart2680eea2007-04-25 09:52:55 -04009474 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea31012005-04-17 16:05:31 -05009475 * can be issued if the link is not up.
9476 */
9477 switch (piocb->iocb.ulpCommand) {
James Smart84774a42008-08-24 21:50:06 -04009478 case CMD_GEN_REQUEST64_CR:
9479 case CMD_GEN_REQUEST64_CX:
9480 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9481 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
James Smart6a9c52c2009-10-02 15:16:51 -04009482 FC_RCTL_DD_UNSOL_CMD) ||
James Smart84774a42008-08-24 21:50:06 -04009483 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9484 MENLO_TRANSPORT_TYPE))
9485
9486 goto iocb_busy;
9487 break;
dea31012005-04-17 16:05:31 -05009488 case CMD_QUE_RING_BUF_CN:
9489 case CMD_QUE_RING_BUF64_CN:
dea31012005-04-17 16:05:31 -05009490 /*
9491 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9492 * completion, iocb_cmpl MUST be 0.
9493 */
9494 if (piocb->iocb_cmpl)
9495 piocb->iocb_cmpl = NULL;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05009496 fallthrough;
dea31012005-04-17 16:05:31 -05009497 case CMD_CREATE_XRI_CR:
James Smart2680eea2007-04-25 09:52:55 -04009498 case CMD_CLOSE_XRI_CN:
9499 case CMD_CLOSE_XRI_CX:
dea31012005-04-17 16:05:31 -05009500 break;
9501 default:
9502 goto iocb_busy;
9503 }
9504
9505 /*
9506 * For FCP commands, we must be in a state where we can process link
9507 * attention events.
9508 */
James Smart895427b2017-02-12 13:52:30 -08009509 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
James Smart92d7f7b2007-06-17 19:56:38 -05009510 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea31012005-04-17 16:05:31 -05009511 goto iocb_busy;
James Smart92d7f7b2007-06-17 19:56:38 -05009512 }
dea31012005-04-17 16:05:31 -05009513
dea31012005-04-17 16:05:31 -05009514 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9515 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9516 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9517
9518 if (iocb)
9519 lpfc_sli_update_ring(phba, pring);
9520 else
9521 lpfc_sli_update_full_ring(phba, pring);
9522
9523 if (!piocb)
9524 return IOCB_SUCCESS;
9525
9526 goto out_busy;
9527
9528 iocb_busy:
9529 pring->stats.iocb_cmd_delay++;
9530
9531 out_busy:
9532
9533 if (!(flag & SLI_IOCB_RET_IOCB)) {
James Smart92d7f7b2007-06-17 19:56:38 -05009534 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea31012005-04-17 16:05:31 -05009535 return IOCB_SUCCESS;
9536 }
9537
9538 return IOCB_BUSY;
9539}
9540
James Smart3772a992009-05-22 14:50:54 -04009541/**
James Smart4f774512009-05-22 14:52:35 -04009542 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9543 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01009544 * @piocbq: Pointer to command iocb.
James Smart4f774512009-05-22 14:52:35 -04009545 * @sglq: Pointer to the scatter gather queue object.
9546 *
9547 * This routine converts the bpl or bde that is in the IOCB
9548 * to a sgl list for the sli4 hardware. The physical address
9549 * of the bpl/bde is converted back to a virtual address.
9550 * If the IOCB contains a BPL then the list of BDE's is
9551 * converted to sli4_sge's. If the IOCB contains a single
9552 * BDE then it is converted to a single sli_sge.
9553 * The IOCB is still in cpu endianess so the contents of
9554 * the bpl can be used without byte swapping.
9555 *
9556 * Returns valid XRI = Success, NO_XRI = Failure.
9557**/
9558static uint16_t
9559lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9560 struct lpfc_sglq *sglq)
9561{
9562 uint16_t xritag = NO_XRI;
9563 struct ulp_bde64 *bpl = NULL;
9564 struct ulp_bde64 bde;
9565 struct sli4_sge *sgl = NULL;
James Smart1b511972011-12-13 13:23:09 -05009566 struct lpfc_dmabuf *dmabuf;
James Smart4f774512009-05-22 14:52:35 -04009567 IOCB_t *icmd;
9568 int numBdes = 0;
9569 int i = 0;
James Smart63e801c2010-11-20 23:14:19 -05009570 uint32_t offset = 0; /* accumulated offset in the sg request list */
9571 int inbound = 0; /* number of sg reply entries inbound from firmware */
James Smart4f774512009-05-22 14:52:35 -04009572
9573 if (!piocbq || !sglq)
9574 return xritag;
9575
9576 sgl = (struct sli4_sge *)sglq->sgl;
9577 icmd = &piocbq->iocb;
James Smart6b5151f2012-01-18 16:24:06 -05009578 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9579 return sglq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04009580 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9581 numBdes = icmd->un.genreq64.bdl.bdeSize /
9582 sizeof(struct ulp_bde64);
9583 /* The addrHigh and addrLow fields within the IOCB
9584 * have not been byteswapped yet so there is no
9585 * need to swap them back.
9586 */
James Smart1b511972011-12-13 13:23:09 -05009587 if (piocbq->context3)
9588 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9589 else
9590 return xritag;
James Smart4f774512009-05-22 14:52:35 -04009591
James Smart1b511972011-12-13 13:23:09 -05009592 bpl = (struct ulp_bde64 *)dmabuf->virt;
James Smart4f774512009-05-22 14:52:35 -04009593 if (!bpl)
9594 return xritag;
9595
9596 for (i = 0; i < numBdes; i++) {
9597 /* Should already be byte swapped. */
James Smart28baac72010-02-12 14:42:03 -05009598 sgl->addr_hi = bpl->addrHigh;
9599 sgl->addr_lo = bpl->addrLow;
9600
James Smart05580562011-05-24 11:40:48 -04009601 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04009602 if ((i+1) == numBdes)
9603 bf_set(lpfc_sli4_sge_last, sgl, 1);
9604 else
9605 bf_set(lpfc_sli4_sge_last, sgl, 0);
James Smart28baac72010-02-12 14:42:03 -05009606 /* swap the size field back to the cpu so we
9607 * can assign it to the sgl.
9608 */
9609 bde.tus.w = le32_to_cpu(bpl->tus.w);
9610 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
James Smart63e801c2010-11-20 23:14:19 -05009611 /* The offsets in the sgl need to be accumulated
9612 * separately for the request and reply lists.
9613 * The request is always first, the reply follows.
9614 */
9615 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9616 /* add up the reply sg entries */
9617 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9618 inbound++;
9619 /* first inbound? reset the offset */
9620 if (inbound == 1)
9621 offset = 0;
9622 bf_set(lpfc_sli4_sge_offset, sgl, offset);
James Smartf9bb2da2011-10-10 21:34:11 -04009623 bf_set(lpfc_sli4_sge_type, sgl,
9624 LPFC_SGE_TYPE_DATA);
James Smart63e801c2010-11-20 23:14:19 -05009625 offset += bde.tus.f.bdeSize;
9626 }
James Smart546fc852011-03-11 16:06:29 -05009627 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04009628 bpl++;
9629 sgl++;
9630 }
9631 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9632 /* The addrHigh and addrLow fields of the BDE have not
9633 * been byteswapped yet so they need to be swapped
9634 * before putting them in the sgl.
9635 */
9636 sgl->addr_hi =
9637 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9638 sgl->addr_lo =
9639 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
James Smart05580562011-05-24 11:40:48 -04009640 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04009641 bf_set(lpfc_sli4_sge_last, sgl, 1);
9642 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart28baac72010-02-12 14:42:03 -05009643 sgl->sge_len =
9644 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
James Smart4f774512009-05-22 14:52:35 -04009645 }
9646 return sglq->sli4_xritag;
9647}
9648
9649/**
Lee Jones8514e2f2021-03-03 14:46:18 +00009650 * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
James Smart4f774512009-05-22 14:52:35 -04009651 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +01009652 * @iocbq: Pointer to command iocb.
James Smart4f774512009-05-22 14:52:35 -04009653 * @wqe: Pointer to the work queue entry.
9654 *
9655 * This routine converts the iocb command to its Work Queue Entry
9656 * equivalent. The wqe pointer should not have any fields set when
9657 * this routine is called because it will memcpy over them.
9658 * This routine does not set the CQ_ID or the WQEC bits in the
9659 * wqe.
9660 *
9661 * Returns: 0 = Success, IOCB_ERROR = Failure.
9662 **/
9663static int
9664lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
James Smart205e8242018-03-05 12:04:03 -08009665 union lpfc_wqe128 *wqe)
James Smart4f774512009-05-22 14:52:35 -04009666{
James Smart5ffc2662009-11-18 15:39:44 -05009667 uint32_t xmit_len = 0, total_len = 0;
James Smart4f774512009-05-22 14:52:35 -04009668 uint8_t ct = 0;
9669 uint32_t fip;
9670 uint32_t abort_tag;
9671 uint8_t command_type = ELS_COMMAND_NON_FIP;
9672 uint8_t cmnd;
9673 uint16_t xritag;
James Smartdcf2a4e2010-09-29 11:18:53 -04009674 uint16_t abrt_iotag;
9675 struct lpfc_iocbq *abrtiocbq;
James Smart4f774512009-05-22 14:52:35 -04009676 struct ulp_bde64 *bpl = NULL;
James Smartf0d9bcc2010-10-22 11:07:09 -04009677 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
James Smart5ffc2662009-11-18 15:39:44 -05009678 int numBdes, i;
9679 struct ulp_bde64 bde;
James Smartc31098c2011-04-16 11:03:33 -04009680 struct lpfc_nodelist *ndlp;
James Smartff78d8f2011-12-13 13:21:35 -05009681 uint32_t *pcmd;
James Smart1b511972011-12-13 13:23:09 -05009682 uint32_t if_type;
James Smart4f774512009-05-22 14:52:35 -04009683
James Smart45ed1192009-10-02 15:17:02 -04009684 fip = phba->hba_flag & HBA_FIP_SUPPORT;
James Smart4f774512009-05-22 14:52:35 -04009685 /* The fcp commands will set command type */
James Smart0c287582009-06-10 17:22:56 -04009686 if (iocbq->iocb_flag & LPFC_IO_FCP)
James Smart4f774512009-05-22 14:52:35 -04009687 command_type = FCP_COMMAND;
James Smartc8685952009-11-18 15:39:16 -05009688 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
James Smart0c287582009-06-10 17:22:56 -04009689 command_type = ELS_COMMAND_FIP;
9690 else
9691 command_type = ELS_COMMAND_NON_FIP;
9692
James Smartb5c53952016-03-31 14:12:30 -07009693 if (phba->fcp_embed_io)
9694 memset(wqe, 0, sizeof(union lpfc_wqe128));
James Smart4f774512009-05-22 14:52:35 -04009695 /* Some of the fields are in the right position already */
9696 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
James Smarte62245d2019-08-14 16:57:08 -07009697 /* The ct field has moved so reset */
9698 wqe->generic.wqe_com.word7 = 0;
9699 wqe->generic.wqe_com.word10 = 0;
James Smartb5c53952016-03-31 14:12:30 -07009700
9701 abort_tag = (uint32_t) iocbq->iotag;
9702 xritag = iocbq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04009703 /* words0-2 bpl convert bde */
9704 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
James Smart5ffc2662009-11-18 15:39:44 -05009705 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9706 sizeof(struct ulp_bde64);
James Smart4f774512009-05-22 14:52:35 -04009707 bpl = (struct ulp_bde64 *)
9708 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9709 if (!bpl)
9710 return IOCB_ERROR;
9711
9712 /* Should already be byte swapped. */
9713 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9714 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9715 /* swap the size field back to the cpu so we
9716 * can assign it to the sgl.
9717 */
9718 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
James Smart5ffc2662009-11-18 15:39:44 -05009719 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9720 total_len = 0;
9721 for (i = 0; i < numBdes; i++) {
9722 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9723 total_len += bde.tus.f.bdeSize;
9724 }
James Smart4f774512009-05-22 14:52:35 -04009725 } else
James Smart5ffc2662009-11-18 15:39:44 -05009726 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
James Smart4f774512009-05-22 14:52:35 -04009727
9728 iocbq->iocb.ulpIoTag = iocbq->iotag;
9729 cmnd = iocbq->iocb.ulpCommand;
9730
9731 switch (iocbq->iocb.ulpCommand) {
9732 case CMD_ELS_REQUEST64_CR:
James Smart93d1379e2012-05-09 21:19:34 -04009733 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9734 ndlp = iocbq->context_un.ndlp;
9735 else
9736 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04009737 if (!iocbq->iocb.ulpLe) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -04009739 "2007 Only Limited Edition cmd Format"
9740 " supported 0x%x\n",
9741 iocbq->iocb.ulpCommand);
9742 return IOCB_ERROR;
9743 }
James Smartff78d8f2011-12-13 13:21:35 -05009744
James Smart5ffc2662009-11-18 15:39:44 -05009745 wqe->els_req.payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04009746 /* Els_reguest64 has a TMO */
9747 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9748 iocbq->iocb.ulpTimeout);
9749 /* Need a VF for word 4 set the vf bit*/
9750 bf_set(els_req64_vf, &wqe->els_req, 0);
9751 /* And a VFID for word 12 */
9752 bf_set(els_req64_vfid, &wqe->els_req, 0);
James Smart4f774512009-05-22 14:52:35 -04009753 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
James Smartf0d9bcc2010-10-22 11:07:09 -04009754 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9755 iocbq->iocb.ulpContext);
9756 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9757 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
James Smart4f774512009-05-22 14:52:35 -04009758 /* CCP CCPE PV PRI in word10 were set in the memcpy */
James Smartff78d8f2011-12-13 13:21:35 -05009759 if (command_type == ELS_COMMAND_FIP)
James Smartc8685952009-11-18 15:39:16 -05009760 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9761 >> LPFC_FIP_ELS_ID_SHIFT);
James Smartff78d8f2011-12-13 13:21:35 -05009762 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9763 iocbq->context2)->virt);
James Smart1b511972011-12-13 13:23:09 -05009764 if_type = bf_get(lpfc_sli_intf_if_type,
9765 &phba->sli4_hba.sli_intf);
James Smart27d6ac02018-02-22 08:18:42 -08009766 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
James Smartff78d8f2011-12-13 13:21:35 -05009767 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
James Smartcb69f7d2011-12-13 13:21:57 -05009768 *pcmd == ELS_CMD_SCR ||
James Smartdf3fe762020-02-10 09:31:55 -08009769 *pcmd == ELS_CMD_RDF ||
James Smartf60cb932019-05-14 14:58:05 -07009770 *pcmd == ELS_CMD_RSCN_XMT ||
James Smart6b5151f2012-01-18 16:24:06 -05009771 *pcmd == ELS_CMD_FDISC ||
James Smartbdcd2b92012-03-01 22:33:52 -05009772 *pcmd == ELS_CMD_LOGO ||
Gaurav Srivastavaf56e86a2021-06-08 10:05:53 +05309773 *pcmd == ELS_CMD_QFPA ||
9774 *pcmd == ELS_CMD_UVEM ||
James Smartff78d8f2011-12-13 13:21:35 -05009775 *pcmd == ELS_CMD_PLOGI)) {
9776 bf_set(els_req64_sp, &wqe->els_req, 1);
9777 bf_set(els_req64_sid, &wqe->els_req,
9778 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04009779 if ((*pcmd == ELS_CMD_FLOGI) &&
9780 !(phba->fc_topology ==
9781 LPFC_TOPOLOGY_LOOP))
9782 bf_set(els_req64_sid, &wqe->els_req, 0);
James Smartff78d8f2011-12-13 13:21:35 -05009783 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9784 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
James Smarta7dd9c02012-05-09 21:16:50 -04009785 phba->vpi_ids[iocbq->vport->vpi]);
James Smart3ef6d242012-01-18 16:23:48 -05009786 } else if (pcmd && iocbq->context1) {
James Smartff78d8f2011-12-13 13:21:35 -05009787 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9788 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9789 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9790 }
James Smartc8685952009-11-18 15:39:16 -05009791 }
James Smart6d368e52011-05-24 11:44:12 -04009792 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9793 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009794 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9795 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9796 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9797 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9798 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9799 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04009800 wqe->els_req.max_response_payload_len = total_len - xmit_len;
James Smart7851fe22011-07-22 18:36:52 -04009801 break;
James Smart5ffc2662009-11-18 15:39:44 -05009802 case CMD_XMIT_SEQUENCE64_CX:
James Smartf0d9bcc2010-10-22 11:07:09 -04009803 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9804 iocbq->iocb.un.ulpWord[3]);
9805 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04009806 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart5ffc2662009-11-18 15:39:44 -05009807 /* The entire sequence is transmitted for this IOCB */
9808 xmit_len = total_len;
9809 cmnd = CMD_XMIT_SEQUENCE64_CR;
James Smart1b511972011-12-13 13:23:09 -05009810 if (phba->link_flag & LS_LOOPBACK_MODE)
9811 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05009812 fallthrough;
James Smart4f774512009-05-22 14:52:35 -04009813 case CMD_XMIT_SEQUENCE64_CR:
James Smartf0d9bcc2010-10-22 11:07:09 -04009814 /* word3 iocb=io_tag32 wqe=reserved */
9815 wqe->xmit_sequence.rsvd3 = 0;
James Smart4f774512009-05-22 14:52:35 -04009816 /* word4 relative_offset memcpy */
9817 /* word5 r_ctl/df_ctl memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04009818 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9819 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9820 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9821 LPFC_WQE_IOD_WRITE);
9822 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9823 LPFC_WQE_LENLOC_WORD12);
9824 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
James Smart5ffc2662009-11-18 15:39:44 -05009825 wqe->xmit_sequence.xmit_len = xmit_len;
9826 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04009827 break;
James Smart4f774512009-05-22 14:52:35 -04009828 case CMD_XMIT_BCAST64_CN:
James Smartf0d9bcc2010-10-22 11:07:09 -04009829 /* word3 iocb=iotag32 wqe=seq_payload_len */
9830 wqe->xmit_bcast64.seq_payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04009831 /* word4 iocb=rsvd wqe=rsvd */
9832 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9833 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
James Smartf0d9bcc2010-10-22 11:07:09 -04009834 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04009835 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
James Smartf0d9bcc2010-10-22 11:07:09 -04009836 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9837 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9838 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9839 LPFC_WQE_LENLOC_WORD3);
9840 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04009841 break;
James Smart4f774512009-05-22 14:52:35 -04009842 case CMD_FCP_IWRITE64_CR:
9843 command_type = FCP_COMMAND_DATA_OUT;
James Smartf0d9bcc2010-10-22 11:07:09 -04009844 /* word3 iocb=iotag wqe=payload_offset_len */
9845 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04009846 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9847 xmit_len + sizeof(struct fcp_rsp));
9848 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9849 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04009850 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9851 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9852 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9853 iocbq->iocb.ulpFCP2Rcvy);
9854 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9855 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04009856 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9857 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9858 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04009859 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05009860 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05009861 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9862 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07009863 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9864 if (iocbq->priority) {
9865 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9866 (iocbq->priority << 1));
9867 } else {
James Smart1ba981f2014-02-20 09:56:45 -05009868 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9869 (phba->cfg_XLanePriority << 1));
9870 }
9871 }
James Smartb5c53952016-03-31 14:12:30 -07009872 /* Note, word 10 is already initialized to 0 */
9873
James Smart414abe02018-06-26 08:24:26 -07009874 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9875 if (phba->cfg_enable_pbde)
James Smart0bc2b7c2018-02-22 08:18:48 -08009876 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9877 else
9878 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9879
James Smartb5c53952016-03-31 14:12:30 -07009880 if (phba->fcp_embed_io) {
James Smartc4908502019-01-28 11:14:28 -08009881 struct lpfc_io_buf *lpfc_cmd;
James Smartb5c53952016-03-31 14:12:30 -07009882 struct sli4_sge *sgl;
James Smartb5c53952016-03-31 14:12:30 -07009883 struct fcp_cmnd *fcp_cmnd;
9884 uint32_t *ptr;
9885
9886 /* 128 byte wqe support here */
James Smartb5c53952016-03-31 14:12:30 -07009887
9888 lpfc_cmd = iocbq->context1;
James Smart0794d602019-01-28 11:14:19 -08009889 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smartb5c53952016-03-31 14:12:30 -07009890 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9891
9892 /* Word 0-2 - FCP_CMND */
James Smart205e8242018-03-05 12:04:03 -08009893 wqe->generic.bde.tus.f.bdeFlags =
James Smartb5c53952016-03-31 14:12:30 -07009894 BUFF_TYPE_BDE_IMMED;
James Smart205e8242018-03-05 12:04:03 -08009895 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9896 wqe->generic.bde.addrHigh = 0;
9897 wqe->generic.bde.addrLow = 88; /* Word 22 */
James Smartb5c53952016-03-31 14:12:30 -07009898
James Smart205e8242018-03-05 12:04:03 -08009899 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9900 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
James Smartb5c53952016-03-31 14:12:30 -07009901
9902 /* Word 22-29 FCP CMND Payload */
James Smart205e8242018-03-05 12:04:03 -08009903 ptr = &wqe->words[22];
James Smartb5c53952016-03-31 14:12:30 -07009904 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9905 }
James Smart7851fe22011-07-22 18:36:52 -04009906 break;
James Smartf0d9bcc2010-10-22 11:07:09 -04009907 case CMD_FCP_IREAD64_CR:
9908 /* word3 iocb=iotag wqe=payload_offset_len */
9909 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04009910 bf_set(payload_offset_len, &wqe->fcp_iread,
9911 xmit_len + sizeof(struct fcp_rsp));
9912 bf_set(cmd_buff_len, &wqe->fcp_iread,
9913 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04009914 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9915 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9916 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9917 iocbq->iocb.ulpFCP2Rcvy);
9918 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
James Smart4f774512009-05-22 14:52:35 -04009919 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04009920 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9921 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9922 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04009923 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05009924 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05009925 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9926 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07009927 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9928 if (iocbq->priority) {
9929 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9930 (iocbq->priority << 1));
9931 } else {
James Smart1ba981f2014-02-20 09:56:45 -05009932 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9933 (phba->cfg_XLanePriority << 1));
9934 }
9935 }
James Smartb5c53952016-03-31 14:12:30 -07009936 /* Note, word 10 is already initialized to 0 */
9937
James Smart414abe02018-06-26 08:24:26 -07009938 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9939 if (phba->cfg_enable_pbde)
James Smart0bc2b7c2018-02-22 08:18:48 -08009940 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9941 else
9942 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9943
James Smartb5c53952016-03-31 14:12:30 -07009944 if (phba->fcp_embed_io) {
James Smartc4908502019-01-28 11:14:28 -08009945 struct lpfc_io_buf *lpfc_cmd;
James Smartb5c53952016-03-31 14:12:30 -07009946 struct sli4_sge *sgl;
James Smartb5c53952016-03-31 14:12:30 -07009947 struct fcp_cmnd *fcp_cmnd;
9948 uint32_t *ptr;
9949
9950 /* 128 byte wqe support here */
James Smartb5c53952016-03-31 14:12:30 -07009951
9952 lpfc_cmd = iocbq->context1;
James Smart0794d602019-01-28 11:14:19 -08009953 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smartb5c53952016-03-31 14:12:30 -07009954 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9955
9956 /* Word 0-2 - FCP_CMND */
James Smart205e8242018-03-05 12:04:03 -08009957 wqe->generic.bde.tus.f.bdeFlags =
James Smartb5c53952016-03-31 14:12:30 -07009958 BUFF_TYPE_BDE_IMMED;
James Smart205e8242018-03-05 12:04:03 -08009959 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9960 wqe->generic.bde.addrHigh = 0;
9961 wqe->generic.bde.addrLow = 88; /* Word 22 */
James Smartb5c53952016-03-31 14:12:30 -07009962
James Smart205e8242018-03-05 12:04:03 -08009963 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9964 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
James Smartb5c53952016-03-31 14:12:30 -07009965
9966 /* Word 22-29 FCP CMND Payload */
James Smart205e8242018-03-05 12:04:03 -08009967 ptr = &wqe->words[22];
James Smartb5c53952016-03-31 14:12:30 -07009968 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9969 }
James Smart7851fe22011-07-22 18:36:52 -04009970 break;
James Smartf1126682009-06-10 17:22:44 -04009971 case CMD_FCP_ICMND64_CR:
James Smart0ba4b212013-10-10 12:22:38 -04009972 /* word3 iocb=iotag wqe=payload_offset_len */
9973 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9974 bf_set(payload_offset_len, &wqe->fcp_icmd,
9975 xmit_len + sizeof(struct fcp_rsp));
9976 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9977 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04009978 /* word3 iocb=IO_TAG wqe=reserved */
James Smartf0d9bcc2010-10-22 11:07:09 -04009979 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
James Smartf1126682009-06-10 17:22:44 -04009980 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04009981 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9982 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9983 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9984 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9985 LPFC_WQE_LENLOC_NONE);
James Smart2a94aea2012-09-29 11:30:31 -04009986 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9987 iocbq->iocb.ulpFCP2Rcvy);
James Smart1ba981f2014-02-20 09:56:45 -05009988 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9989 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07009990 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9991 if (iocbq->priority) {
9992 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9993 (iocbq->priority << 1));
9994 } else {
James Smart1ba981f2014-02-20 09:56:45 -05009995 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9996 (phba->cfg_XLanePriority << 1));
9997 }
9998 }
James Smartb5c53952016-03-31 14:12:30 -07009999 /* Note, word 10 is already initialized to 0 */
10000
10001 if (phba->fcp_embed_io) {
James Smartc4908502019-01-28 11:14:28 -080010002 struct lpfc_io_buf *lpfc_cmd;
James Smartb5c53952016-03-31 14:12:30 -070010003 struct sli4_sge *sgl;
James Smartb5c53952016-03-31 14:12:30 -070010004 struct fcp_cmnd *fcp_cmnd;
10005 uint32_t *ptr;
10006
10007 /* 128 byte wqe support here */
James Smartb5c53952016-03-31 14:12:30 -070010008
10009 lpfc_cmd = iocbq->context1;
James Smart0794d602019-01-28 11:14:19 -080010010 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smartb5c53952016-03-31 14:12:30 -070010011 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10012
10013 /* Word 0-2 - FCP_CMND */
James Smart205e8242018-03-05 12:04:03 -080010014 wqe->generic.bde.tus.f.bdeFlags =
James Smartb5c53952016-03-31 14:12:30 -070010015 BUFF_TYPE_BDE_IMMED;
James Smart205e8242018-03-05 12:04:03 -080010016 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10017 wqe->generic.bde.addrHigh = 0;
10018 wqe->generic.bde.addrLow = 88; /* Word 22 */
James Smartb5c53952016-03-31 14:12:30 -070010019
James Smart205e8242018-03-05 12:04:03 -080010020 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10021 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
James Smartb5c53952016-03-31 14:12:30 -070010022
10023 /* Word 22-29 FCP CMND Payload */
James Smart205e8242018-03-05 12:04:03 -080010024 ptr = &wqe->words[22];
James Smartb5c53952016-03-31 14:12:30 -070010025 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10026 }
James Smart7851fe22011-07-22 18:36:52 -040010027 break;
James Smart4f774512009-05-22 14:52:35 -040010028 case CMD_GEN_REQUEST64_CR:
James Smart63e801c2010-11-20 23:14:19 -050010029 /* For this command calculate the xmit length of the
10030 * request bde.
10031 */
10032 xmit_len = 0;
10033 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10034 sizeof(struct ulp_bde64);
10035 for (i = 0; i < numBdes; i++) {
James Smart63e801c2010-11-20 23:14:19 -050010036 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
James Smart546fc852011-03-11 16:06:29 -050010037 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10038 break;
James Smart63e801c2010-11-20 23:14:19 -050010039 xmit_len += bde.tus.f.bdeSize;
10040 }
James Smartf0d9bcc2010-10-22 11:07:09 -040010041 /* word3 iocb=IO_TAG wqe=request_payload_len */
10042 wqe->gen_req.request_payload_len = xmit_len;
10043 /* word4 iocb=parameter wqe=relative_offset memcpy */
10044 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
James Smart4f774512009-05-22 14:52:35 -040010045 /* word6 context tag copied in memcpy */
10046 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
10047 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
Dick Kennedy372c1872020-06-30 14:50:00 -070010048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040010049 "2015 Invalid CT %x command 0x%x\n",
10050 ct, iocbq->iocb.ulpCommand);
10051 return IOCB_ERROR;
10052 }
James Smartf0d9bcc2010-10-22 11:07:09 -040010053 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10054 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10055 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10056 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10057 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10058 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10059 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10060 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -040010061 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
James Smart4f774512009-05-22 14:52:35 -040010062 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -040010063 break;
James Smart4f774512009-05-22 14:52:35 -040010064 case CMD_XMIT_ELS_RSP64_CX:
James Smartc31098c2011-04-16 11:03:33 -040010065 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -040010066 /* words0-2 BDE memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -040010067 /* word3 iocb=iotag32 wqe=response_payload_len */
10068 wqe->xmit_els_rsp.response_payload_len = xmit_len;
James Smart939723a2012-05-09 21:19:03 -040010069 /* word4 */
10070 wqe->xmit_els_rsp.word4 = 0;
James Smart4f774512009-05-22 14:52:35 -040010071 /* word5 iocb=rsvd wge=did */
10072 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
James Smart939723a2012-05-09 21:19:03 -040010073 iocbq->iocb.un.xseq64.xmit_els_remoteID);
10074
10075 if_type = bf_get(lpfc_sli_intf_if_type,
10076 &phba->sli4_hba.sli_intf);
James Smart27d6ac02018-02-22 08:18:42 -080010077 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
James Smart939723a2012-05-09 21:19:03 -040010078 if (iocbq->vport->fc_flag & FC_PT2PT) {
10079 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10080 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10081 iocbq->vport->fc_myDID);
10082 if (iocbq->vport->fc_myDID == Fabric_DID) {
10083 bf_set(wqe_els_did,
10084 &wqe->xmit_els_rsp.wqe_dest, 0);
10085 }
10086 }
10087 }
James Smartf0d9bcc2010-10-22 11:07:09 -040010088 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10089 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10090 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10091 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -040010092 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart4f774512009-05-22 14:52:35 -040010093 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
James Smartf0d9bcc2010-10-22 11:07:09 -040010094 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smart6d368e52011-05-24 11:44:12 -040010095 phba->vpi_ids[iocbq->vport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -040010096 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10097 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10098 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10099 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10100 LPFC_WQE_LENLOC_WORD3);
10101 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
James Smart6d368e52011-05-24 11:44:12 -040010102 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10103 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartff78d8f2011-12-13 13:21:35 -050010104 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10105 iocbq->context2)->virt);
10106 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
James Smart939723a2012-05-09 21:19:03 -040010107 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10108 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
James Smartff78d8f2011-12-13 13:21:35 -050010109 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -040010110 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10111 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smartff78d8f2011-12-13 13:21:35 -050010112 phba->vpi_ids[phba->pport->vpi]);
10113 }
James Smart4f774512009-05-22 14:52:35 -040010114 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -040010115 break;
James Smart4f774512009-05-22 14:52:35 -040010116 case CMD_CLOSE_XRI_CN:
10117 case CMD_ABORT_XRI_CN:
10118 case CMD_ABORT_XRI_CX:
10119 /* words 0-2 memcpy should be 0 rserved */
10120 /* port will send abts */
James Smartdcf2a4e2010-09-29 11:18:53 -040010121 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10122 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10123 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10124 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
10125 } else
10126 fip = 0;
10127
10128 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
James Smart4f774512009-05-22 14:52:35 -040010129 /*
James Smartdcf2a4e2010-09-29 11:18:53 -040010130 * The link is down, or the command was ELS_FIP
10131 * so the fw does not need to send abts
James Smart4f774512009-05-22 14:52:35 -040010132 * on the wire.
10133 */
10134 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10135 else
10136 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10137 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
James Smartf0d9bcc2010-10-22 11:07:09 -040010138 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10139 wqe->abort_cmd.rsrvd5 = 0;
10140 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
James Smart4f774512009-05-22 14:52:35 -040010141 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10142 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
James Smart4f774512009-05-22 14:52:35 -040010143 /*
10144 * The abort handler will send us CMD_ABORT_XRI_CN or
10145 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10146 */
James Smartf0d9bcc2010-10-22 11:07:09 -040010147 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10148 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10149 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10150 LPFC_WQE_LENLOC_NONE);
James Smart4f774512009-05-22 14:52:35 -040010151 cmnd = CMD_ABORT_XRI_CX;
10152 command_type = OTHER_COMMAND;
10153 xritag = 0;
James Smart7851fe22011-07-22 18:36:52 -040010154 break;
James Smart6669f9b2009-10-02 15:16:45 -040010155 case CMD_XMIT_BLS_RSP64_CX:
James Smart6b5151f2012-01-18 16:24:06 -050010156 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart546fc852011-03-11 16:06:29 -050010157 /* As BLS ABTS RSP WQE is very different from other WQEs,
James Smart6669f9b2009-10-02 15:16:45 -040010158 * we re-construct this WQE here based on information in
10159 * iocbq from scratch.
10160 */
James Smartd9f492a2019-08-14 16:57:04 -070010161 memset(wqe, 0, sizeof(*wqe));
James Smart5ffc2662009-11-18 15:39:44 -050010162 /* OX_ID is invariable to who sent ABTS to CT exchange */
James Smart6669f9b2009-10-02 15:16:45 -040010163 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -050010164 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10165 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
James Smart5ffc2662009-11-18 15:39:44 -050010166 LPFC_ABTS_UNSOL_INT) {
10167 /* ABTS sent by initiator to CT exchange, the
10168 * RX_ID field will be filled with the newly
10169 * allocated responder XRI.
10170 */
10171 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10172 iocbq->sli4_xritag);
10173 } else {
10174 /* ABTS sent by responder to CT exchange, the
10175 * RX_ID field will be filled with the responder
10176 * RX_ID from ABTS.
10177 */
10178 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -050010179 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
James Smart5ffc2662009-11-18 15:39:44 -050010180 }
James Smart6669f9b2009-10-02 15:16:45 -040010181 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10182 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
James Smart6b5151f2012-01-18 16:24:06 -050010183
10184 /* Use CT=VPI */
10185 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10186 ndlp->nlp_DID);
10187 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10188 iocbq->iocb.ulpContext);
10189 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
James Smart6669f9b2009-10-02 15:16:45 -040010190 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
James Smart6b5151f2012-01-18 16:24:06 -050010191 phba->vpi_ids[phba->pport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -040010192 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10193 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10194 LPFC_WQE_LENLOC_NONE);
James Smart6669f9b2009-10-02 15:16:45 -040010195 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10196 command_type = OTHER_COMMAND;
James Smart546fc852011-03-11 16:06:29 -050010197 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10198 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10199 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10200 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10201 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10202 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10203 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10204 }
10205
James Smart7851fe22011-07-22 18:36:52 -040010206 break;
James Smartae9e28f2017-05-15 15:20:51 -070010207 case CMD_SEND_FRAME:
James Smarte62245d2019-08-14 16:57:08 -070010208 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10209 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10210 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10211 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10212 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10213 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10214 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10215 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10216 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smartae9e28f2017-05-15 15:20:51 -070010217 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10218 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10219 return 0;
James Smart4f774512009-05-22 14:52:35 -040010220 case CMD_XRI_ABORTED_CX:
10221 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
James Smart4f774512009-05-22 14:52:35 -040010222 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10223 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10224 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10225 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10226 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070010227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040010228 "2014 Invalid command 0x%x\n",
10229 iocbq->iocb.ulpCommand);
10230 return IOCB_ERROR;
James Smart4f774512009-05-22 14:52:35 -040010231 }
James Smart6d368e52011-05-24 11:44:12 -040010232
James Smart8012cc32012-10-31 14:44:49 -040010233 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10234 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10235 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10236 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10237 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10238 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10239 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10240 LPFC_IO_DIF_INSERT);
James Smartf0d9bcc2010-10-22 11:07:09 -040010241 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10242 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10243 wqe->generic.wqe_com.abort_tag = abort_tag;
10244 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10245 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10246 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10247 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smart4f774512009-05-22 14:52:35 -040010248 return 0;
10249}
10250
10251/**
James Smart47ff4c52020-11-15 11:26:41 -080010252 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10253 * @phba: Pointer to HBA context object.
10254 * @ring_number: SLI ring number to issue wqe on.
10255 * @piocb: Pointer to command iocb.
10256 * @flag: Flag indicating if this command can be put into txq.
10257 *
10258 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10259 * send an iocb command to an HBA with SLI-4 interface spec.
10260 *
10261 * This function takes the hbalock before invoking the lockless version.
10262 * The function will return success after it successfully submit the wqe to
10263 * firmware or after adding to the txq.
10264 **/
10265static int
10266__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10267 struct lpfc_iocbq *piocb, uint32_t flag)
10268{
10269 unsigned long iflags;
10270 int rc;
10271
10272 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartda255e22020-11-15 11:26:42 -080010273 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
James Smart47ff4c52020-11-15 11:26:41 -080010274 spin_unlock_irqrestore(&phba->hbalock, iflags);
10275
10276 return rc;
10277}
10278
10279/**
10280 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10281 * @phba: Pointer to HBA context object.
10282 * @ring_number: SLI ring number to issue wqe on.
10283 * @piocb: Pointer to command iocb.
10284 * @flag: Flag indicating if this command can be put into txq.
10285 *
10286 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10287 * an wqe command to an HBA with SLI-4 interface spec.
10288 *
10289 * This function is a lockless version. The function will return success
10290 * after it successfully submit the wqe to firmware or after adding to the
10291 * txq.
10292 **/
10293static int
10294__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10295 struct lpfc_iocbq *piocb, uint32_t flag)
10296{
James Smart47ff4c52020-11-15 11:26:41 -080010297 int rc;
James Smartda255e22020-11-15 11:26:42 -080010298 struct lpfc_io_buf *lpfc_cmd =
10299 (struct lpfc_io_buf *)piocb->context1;
10300 union lpfc_wqe128 *wqe = &piocb->wqe;
10301 struct sli4_sge *sgl;
James Smart47ff4c52020-11-15 11:26:41 -080010302
James Smartda255e22020-11-15 11:26:42 -080010303 /* 128 byte wqe support here */
10304 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
James Smart47ff4c52020-11-15 11:26:41 -080010305
James Smartda255e22020-11-15 11:26:42 -080010306 if (phba->fcp_embed_io) {
10307 struct fcp_cmnd *fcp_cmnd;
10308 u32 *ptr;
James Smart47ff4c52020-11-15 11:26:41 -080010309
James Smartda255e22020-11-15 11:26:42 -080010310 fcp_cmnd = lpfc_cmd->fcp_cmnd;
James Smart47ff4c52020-11-15 11:26:41 -080010311
James Smartda255e22020-11-15 11:26:42 -080010312 /* Word 0-2 - FCP_CMND */
10313 wqe->generic.bde.tus.f.bdeFlags =
10314 BUFF_TYPE_BDE_IMMED;
10315 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10316 wqe->generic.bde.addrHigh = 0;
10317 wqe->generic.bde.addrLow = 88; /* Word 22 */
10318
10319 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10320 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10321
10322 /* Word 22-29 FCP CMND Payload */
10323 ptr = &wqe->words[22];
10324 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10325 } else {
10326 /* Word 0-2 - Inline BDE */
10327 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10328 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10329 wqe->generic.bde.addrHigh = sgl->addr_hi;
10330 wqe->generic.bde.addrLow = sgl->addr_lo;
10331
10332 /* Word 10 */
10333 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10334 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10335 }
10336
Gaurav Srivastavaf56e86a2021-06-08 10:05:53 +053010337 /* add the VMID tags as per switch response */
10338 if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
10339 if (phba->pport->vmid_priority_tagging) {
10340 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10341 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10342 (piocb->vmid_tag.cs_ctl_vmid));
10343 } else {
10344 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10345 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10346 wqe->words[31] = piocb->vmid_tag.app_id;
10347 }
10348 }
James Smartda255e22020-11-15 11:26:42 -080010349 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
James Smart47ff4c52020-11-15 11:26:41 -080010350 return rc;
10351}
10352
10353/**
James Smart4f774512009-05-22 14:52:35 -040010354 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10355 * @phba: Pointer to HBA context object.
10356 * @ring_number: SLI ring number to issue iocb on.
10357 * @piocb: Pointer to command iocb.
10358 * @flag: Flag indicating if this command can be put into txq.
10359 *
10360 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10361 * an iocb command to an HBA with SLI-4 interface spec.
10362 *
James Smart27f3efd2019-10-18 14:18:19 -070010363 * This function is called with ringlock held. The function will return success
James Smart4f774512009-05-22 14:52:35 -040010364 * after it successfully submit the iocb to firmware or after adding to the
10365 * txq.
10366 **/
10367static int
10368__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10369 struct lpfc_iocbq *piocb, uint32_t flag)
10370{
10371 struct lpfc_sglq *sglq;
James Smart205e8242018-03-05 12:04:03 -080010372 union lpfc_wqe128 wqe;
James Smart1ba981f2014-02-20 09:56:45 -050010373 struct lpfc_queue *wq;
James Smart895427b2017-02-12 13:52:30 -080010374 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -040010375
James Smart895427b2017-02-12 13:52:30 -080010376 /* Get the WQ */
10377 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10378 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
James Smartc00f62e2019-08-14 16:57:11 -070010379 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
James Smart895427b2017-02-12 13:52:30 -080010380 } else {
10381 wq = phba->sli4_hba.els_wq;
10382 }
10383
10384 /* Get corresponding ring */
10385 pring = wq->pring;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010010386
James Smartb5c53952016-03-31 14:12:30 -070010387 /*
10388 * The WQE can be either 64 or 128 bytes,
James Smartb5c53952016-03-31 14:12:30 -070010389 */
James Smartb5c53952016-03-31 14:12:30 -070010390
James Smartcda7fa182019-03-04 15:15:43 -080010391 lockdep_assert_held(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080010392
James Smart4f774512009-05-22 14:52:35 -040010393 if (piocb->sli4_xritag == NO_XRI) {
10394 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
James Smart6b5151f2012-01-18 16:24:06 -050010395 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
James Smart4f774512009-05-22 14:52:35 -040010396 sglq = NULL;
10397 else {
James Smart0e9bb8d2013-03-01 16:35:12 -050010398 if (!list_empty(&pring->txq)) {
James Smart2a9bf3d2010-06-07 15:24:45 -040010399 if (!(flag & SLI_IOCB_RET_IOCB)) {
10400 __lpfc_sli_ringtx_put(phba,
10401 pring, piocb);
10402 return IOCB_SUCCESS;
10403 } else {
10404 return IOCB_BUSY;
10405 }
10406 } else {
James Smart895427b2017-02-12 13:52:30 -080010407 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
James Smart2a9bf3d2010-06-07 15:24:45 -040010408 if (!sglq) {
10409 if (!(flag & SLI_IOCB_RET_IOCB)) {
10410 __lpfc_sli_ringtx_put(phba,
10411 pring,
10412 piocb);
10413 return IOCB_SUCCESS;
10414 } else
10415 return IOCB_BUSY;
10416 }
10417 }
James Smart4f774512009-05-22 14:52:35 -040010418 }
James Smartda255e22020-11-15 11:26:42 -080010419 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
James Smart6d368e52011-05-24 11:44:12 -040010420 /* These IO's already have an XRI and a mapped sgl. */
10421 sglq = NULL;
James Smartda255e22020-11-15 11:26:42 -080010422 }
James Smart2ea259e2017-02-12 13:52:27 -080010423 else {
James Smart6d368e52011-05-24 11:44:12 -040010424 /*
10425 * This is a continuation of a commandi,(CX) so this
James Smart4f774512009-05-22 14:52:35 -040010426 * sglq is on the active list
10427 */
James Smartedccdc12013-01-03 15:43:45 -050010428 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
James Smart4f774512009-05-22 14:52:35 -040010429 if (!sglq)
10430 return IOCB_ERROR;
10431 }
10432
10433 if (sglq) {
James Smart6d368e52011-05-24 11:44:12 -040010434 piocb->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040010435 piocb->sli4_xritag = sglq->sli4_xritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040010436 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
James Smart4f774512009-05-22 14:52:35 -040010437 return IOCB_ERROR;
10438 }
10439
James Smart205e8242018-03-05 12:04:03 -080010440 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
James Smart4f774512009-05-22 14:52:35 -040010441 return IOCB_ERROR;
10442
James Smart205e8242018-03-05 12:04:03 -080010443 if (lpfc_sli4_wq_put(wq, &wqe))
James Smart895427b2017-02-12 13:52:30 -080010444 return IOCB_ERROR;
James Smart4f774512009-05-22 14:52:35 -040010445 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10446
10447 return 0;
10448}
10449
Lee Jones8514e2f2021-03-03 14:46:18 +000010450/*
James Smart47ff4c52020-11-15 11:26:41 -080010451 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10452 *
10453 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10454 * or IOCB for sli-3 function.
10455 * pointer from the lpfc_hba struct.
10456 *
10457 * Return codes:
10458 * IOCB_ERROR - Error
10459 * IOCB_SUCCESS - Success
10460 * IOCB_BUSY - Busy
10461 **/
10462int
10463lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10464 struct lpfc_iocbq *piocb, uint32_t flag)
10465{
10466 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10467}
10468
Lee Jones7af29d42020-07-21 17:41:31 +010010469/*
James Smart3772a992009-05-22 14:50:54 -040010470 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10471 *
10472 * This routine wraps the actual lockless version for issusing IOCB function
10473 * pointer from the lpfc_hba struct.
10474 *
10475 * Return codes:
James Smartb5c53952016-03-31 14:12:30 -070010476 * IOCB_ERROR - Error
10477 * IOCB_SUCCESS - Success
10478 * IOCB_BUSY - Busy
James Smart3772a992009-05-22 14:50:54 -040010479 **/
James Smart2a9bf3d2010-06-07 15:24:45 -040010480int
James Smart3772a992009-05-22 14:50:54 -040010481__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10482 struct lpfc_iocbq *piocb, uint32_t flag)
10483{
10484 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10485}
10486
10487/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010488 * lpfc_sli_api_table_setup - Set up sli api function jump table
James Smart3772a992009-05-22 14:50:54 -040010489 * @phba: The hba struct for which this call is being executed.
10490 * @dev_grp: The HBA PCI-Device group number.
10491 *
10492 * This routine sets up the SLI interface API function jump table in @phba
10493 * struct.
10494 * Returns: 0 - success, -ENODEV - failure.
10495 **/
10496int
10497lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10498{
10499
10500 switch (dev_grp) {
10501 case LPFC_PCI_DEV_LP:
10502 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10503 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
James Smart47ff4c52020-11-15 11:26:41 -080010504 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
James Smart3772a992009-05-22 14:50:54 -040010505 break;
James Smart4f774512009-05-22 14:52:35 -040010506 case LPFC_PCI_DEV_OC:
10507 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10508 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
James Smart47ff4c52020-11-15 11:26:41 -080010509 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
James Smart4f774512009-05-22 14:52:35 -040010510 break;
James Smart3772a992009-05-22 14:50:54 -040010511 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070010512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040010513 "1419 Invalid HBA PCI-device group: 0x%x\n",
10514 dev_grp);
10515 return -ENODEV;
James Smart3772a992009-05-22 14:50:54 -040010516 }
10517 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10518 return 0;
10519}
James Smart92d7f7b2007-06-17 19:56:38 -050010520
James Smarta1efe162015-05-21 13:55:20 -040010521/**
James Smart895427b2017-02-12 13:52:30 -080010522 * lpfc_sli4_calc_ring - Calculates which ring to use
James Smarta1efe162015-05-21 13:55:20 -040010523 * @phba: Pointer to HBA context object.
James Smarta1efe162015-05-21 13:55:20 -040010524 * @piocb: Pointer to command iocb.
10525 *
James Smart895427b2017-02-12 13:52:30 -080010526 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10527 * hba_wqidx, thus we need to calculate the corresponding ring.
James Smarta1efe162015-05-21 13:55:20 -040010528 * Since ABORTS must go on the same WQ of the command they are
James Smart895427b2017-02-12 13:52:30 -080010529 * aborting, we use command's hba_wqidx.
James Smarta1efe162015-05-21 13:55:20 -040010530 */
James Smart895427b2017-02-12 13:52:30 -080010531struct lpfc_sli_ring *
10532lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
James Smart9bd2bff52014-09-03 12:57:30 -040010533{
James Smartc4908502019-01-28 11:14:28 -080010534 struct lpfc_io_buf *lpfc_cmd;
James Smart5e5b5112019-01-28 11:14:22 -080010535
James Smart895427b2017-02-12 13:52:30 -080010536 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
James Smartcdb42be2019-01-28 11:14:21 -080010537 if (unlikely(!phba->sli4_hba.hdwq))
James Smart7370d102019-01-28 11:14:20 -080010538 return NULL;
10539 /*
10540 * for abort iocb hba_wqidx should already
10541 * be setup based on what work queue we used.
10542 */
10543 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
James Smartc4908502019-01-28 11:14:28 -080010544 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
James Smart1fbf9742019-01-28 11:14:26 -080010545 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
James Smart9bd2bff52014-09-03 12:57:30 -040010546 }
James Smartc00f62e2019-08-14 16:57:11 -070010547 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
James Smart895427b2017-02-12 13:52:30 -080010548 } else {
10549 if (unlikely(!phba->sli4_hba.els_wq))
10550 return NULL;
10551 piocb->hba_wqidx = 0;
10552 return phba->sli4_hba.els_wq->pring;
James Smart9bd2bff52014-09-03 12:57:30 -040010553 }
James Smart9bd2bff52014-09-03 12:57:30 -040010554}
10555
James Smarte59058c2008-08-24 21:49:00 -040010556/**
James Smart3621a712009-04-06 18:47:14 -040010557 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -040010558 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010010559 * @ring_number: Ring number
James Smarte59058c2008-08-24 21:49:00 -040010560 * @piocb: Pointer to command iocb.
10561 * @flag: Flag indicating if this command can be put into txq.
10562 *
10563 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10564 * function. This function gets the hbalock and calls
10565 * __lpfc_sli_issue_iocb function and will return the error returned
10566 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10567 * functions which do not hold hbalock.
10568 **/
James Smart92d7f7b2007-06-17 19:56:38 -050010569int
James Smart3772a992009-05-22 14:50:54 -040010570lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
James Smart92d7f7b2007-06-17 19:56:38 -050010571 struct lpfc_iocbq *piocb, uint32_t flag)
10572{
James Smart2a76a282012-08-03 12:35:54 -040010573 struct lpfc_sli_ring *pring;
James Smart93a4d6f2019-11-04 16:57:05 -080010574 struct lpfc_queue *eq;
James Smart92d7f7b2007-06-17 19:56:38 -050010575 unsigned long iflags;
James Smart6a828b02019-01-28 11:14:31 -080010576 int rc;
James Smart92d7f7b2007-06-17 19:56:38 -050010577
James Smart7e56aa22012-08-03 12:35:34 -040010578 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart93a4d6f2019-11-04 16:57:05 -080010579 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10580
James Smart895427b2017-02-12 13:52:30 -080010581 pring = lpfc_sli4_calc_ring(phba, piocb);
10582 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -040010583 return IOCB_ERROR;
James Smartba20c852012-08-03 12:36:52 -040010584
James Smart9bd2bff52014-09-03 12:57:30 -040010585 spin_lock_irqsave(&pring->ring_lock, iflags);
10586 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10587 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080010588
10589 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
James Smart7e56aa22012-08-03 12:35:34 -040010590 } else {
10591 /* For now, SLI2/3 will still use hbalock */
10592 spin_lock_irqsave(&phba->hbalock, iflags);
10593 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10594 spin_unlock_irqrestore(&phba->hbalock, iflags);
10595 }
James Smart92d7f7b2007-06-17 19:56:38 -050010596 return rc;
10597}
10598
James Smarte59058c2008-08-24 21:49:00 -040010599/**
James Smart3621a712009-04-06 18:47:14 -040010600 * lpfc_extra_ring_setup - Extra ring setup function
James Smarte59058c2008-08-24 21:49:00 -040010601 * @phba: Pointer to HBA context object.
10602 *
10603 * This function is called while driver attaches with the
10604 * HBA to setup the extra ring. The extra ring is used
10605 * only when driver needs to support target mode functionality
10606 * or IP over FC functionalities.
10607 *
James Smart895427b2017-02-12 13:52:30 -080010608 * This function is called with no lock held. SLI3 only.
James Smarte59058c2008-08-24 21:49:00 -040010609 **/
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010610static int
10611lpfc_extra_ring_setup( struct lpfc_hba *phba)
10612{
10613 struct lpfc_sli *psli;
10614 struct lpfc_sli_ring *pring;
10615
10616 psli = &phba->sli;
10617
10618 /* Adjust cmd/rsp ring iocb entries more evenly */
James Smarta4bc3372006-12-02 13:34:16 -050010619
10620 /* Take some away from the FCP ring */
James Smart895427b2017-02-12 13:52:30 -080010621 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smart7e56aa22012-08-03 12:35:34 -040010622 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10623 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10624 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10625 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010626
James Smarta4bc3372006-12-02 13:34:16 -050010627 /* and give them to the extra ring */
James Smart895427b2017-02-12 13:52:30 -080010628 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
James Smarta4bc3372006-12-02 13:34:16 -050010629
James Smart7e56aa22012-08-03 12:35:34 -040010630 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10631 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10632 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10633 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010634
10635 /* Setup default profile for this ring */
10636 pring->iotag_max = 4096;
10637 pring->num_mask = 1;
10638 pring->prt[0].profile = 0; /* Mask 0 */
James Smarta4bc3372006-12-02 13:34:16 -050010639 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10640 pring->prt[0].type = phba->cfg_multi_ring_type;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010641 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10642 return 0;
10643}
10644
James Smarte7dab162020-10-20 13:27:12 -070010645static void
10646lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10647 struct lpfc_nodelist *ndlp)
10648{
10649 unsigned long iflags;
10650 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10651
10652 spin_lock_irqsave(&phba->hbalock, iflags);
10653 if (!list_empty(&evtp->evt_listp)) {
10654 spin_unlock_irqrestore(&phba->hbalock, iflags);
10655 return;
10656 }
10657
10658 /* Incrementing the reference count until the queued work is done. */
10659 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10660 if (!evtp->evt_arg1) {
10661 spin_unlock_irqrestore(&phba->hbalock, iflags);
10662 return;
10663 }
10664 evtp->evt = LPFC_EVT_RECOVER_PORT;
10665 list_add_tail(&evtp->evt_listp, &phba->work_list);
10666 spin_unlock_irqrestore(&phba->hbalock, iflags);
10667
10668 lpfc_worker_wake_up(phba);
10669}
10670
James Smartcb69f7d2011-12-13 13:21:57 -050010671/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10672 * @phba: Pointer to HBA context object.
10673 * @iocbq: Pointer to iocb object.
10674 *
10675 * The async_event handler calls this routine when it receives
10676 * an ASYNC_STATUS_CN event from the port. The port generates
10677 * this event when an Abort Sequence request to an rport fails
10678 * twice in succession. The abort could be originated by the
10679 * driver or by the port. The ABTS could have been for an ELS
10680 * or FCP IO. The port only generates this event when an ABTS
10681 * fails to complete after one retry.
10682 */
10683static void
10684lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10685 struct lpfc_iocbq *iocbq)
10686{
10687 struct lpfc_nodelist *ndlp = NULL;
10688 uint16_t rpi = 0, vpi = 0;
10689 struct lpfc_vport *vport = NULL;
10690
10691 /* The rpi in the ulpContext is vport-sensitive. */
10692 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10693 rpi = iocbq->iocb.ulpContext;
10694
10695 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10696 "3092 Port generated ABTS async event "
10697 "on vpi %d rpi %d status 0x%x\n",
10698 vpi, rpi, iocbq->iocb.ulpStatus);
10699
10700 vport = lpfc_find_vport_by_vpid(phba, vpi);
10701 if (!vport)
10702 goto err_exit;
10703 ndlp = lpfc_findnode_rpi(vport, rpi);
James Smart307e3382020-11-15 11:26:30 -080010704 if (!ndlp)
James Smartcb69f7d2011-12-13 13:21:57 -050010705 goto err_exit;
10706
10707 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10708 lpfc_sli_abts_recover_port(vport, ndlp);
10709 return;
10710
10711 err_exit:
10712 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10713 "3095 Event Context not found, no "
10714 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10715 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10716 vpi, rpi);
10717}
10718
10719/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10720 * @phba: pointer to HBA context object.
10721 * @ndlp: nodelist pointer for the impacted rport.
10722 * @axri: pointer to the wcqe containing the failed exchange.
10723 *
10724 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10725 * port. The port generates this event when an abort exchange request to an
10726 * rport fails twice in succession with no reply. The abort could be originated
10727 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10728 */
10729void
10730lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10731 struct lpfc_nodelist *ndlp,
10732 struct sli4_wcqe_xri_aborted *axri)
10733{
James Smart5c1db2a2012-03-01 22:34:36 -050010734 uint32_t ext_status = 0;
James Smartcb69f7d2011-12-13 13:21:57 -050010735
James Smart307e3382020-11-15 11:26:30 -080010736 if (!ndlp) {
James Smartcb69f7d2011-12-13 13:21:57 -050010737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10738 "3115 Node Context not found, driver "
10739 "ignoring abts err event\n");
James Smart6b5151f2012-01-18 16:24:06 -050010740 return;
10741 }
10742
James Smartcb69f7d2011-12-13 13:21:57 -050010743 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10744 "3116 Port generated FCP XRI ABORT event on "
James Smart5c1db2a2012-03-01 22:34:36 -050010745 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
James Smart8e668af2013-05-31 17:04:28 -040010746 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
James Smartcb69f7d2011-12-13 13:21:57 -050010747 bf_get(lpfc_wcqe_xa_xri, axri),
James Smart5c1db2a2012-03-01 22:34:36 -050010748 bf_get(lpfc_wcqe_xa_status, axri),
10749 axri->parameter);
James Smartcb69f7d2011-12-13 13:21:57 -050010750
James Smart5c1db2a2012-03-01 22:34:36 -050010751 /*
10752 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10753 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10754 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10755 */
James Smarte3d2b802012-08-14 14:25:43 -040010756 ext_status = axri->parameter & IOERR_PARAM_MASK;
James Smart5c1db2a2012-03-01 22:34:36 -050010757 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10758 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
James Smarte7dab162020-10-20 13:27:12 -070010759 lpfc_sli_post_recovery_event(phba, ndlp);
James Smartcb69f7d2011-12-13 13:21:57 -050010760}
10761
James Smarte59058c2008-08-24 21:49:00 -040010762/**
James Smart3621a712009-04-06 18:47:14 -040010763 * lpfc_sli_async_event_handler - ASYNC iocb handler function
James Smarte59058c2008-08-24 21:49:00 -040010764 * @phba: Pointer to HBA context object.
10765 * @pring: Pointer to driver SLI ring object.
10766 * @iocbq: Pointer to iocb object.
10767 *
10768 * This function is called by the slow ring event handler
10769 * function when there is an ASYNC event iocb in the ring.
10770 * This function is called with no lock held.
10771 * Currently this function handles only temperature related
10772 * ASYNC events. The function decodes the temperature sensor
10773 * event message and posts events for the management applications.
10774 **/
James Smart98c9ea52007-10-27 13:37:33 -040010775static void
James Smart57127f12007-10-27 13:37:05 -040010776lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10777 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10778{
10779 IOCB_t *icmd;
10780 uint16_t evt_code;
James Smart57127f12007-10-27 13:37:05 -040010781 struct temp_event temp_event_data;
10782 struct Scsi_Host *shost;
James Smarta257bf92009-04-06 18:48:10 -040010783 uint32_t *iocb_w;
James Smart57127f12007-10-27 13:37:05 -040010784
10785 icmd = &iocbq->iocb;
10786 evt_code = icmd->un.asyncstat.evt_code;
James Smart57127f12007-10-27 13:37:05 -040010787
James Smartcb69f7d2011-12-13 13:21:57 -050010788 switch (evt_code) {
10789 case ASYNC_TEMP_WARN:
10790 case ASYNC_TEMP_SAFE:
10791 temp_event_data.data = (uint32_t) icmd->ulpContext;
10792 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10793 if (evt_code == ASYNC_TEMP_WARN) {
10794 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
Dick Kennedy372c1872020-06-30 14:50:00 -070010795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcb69f7d2011-12-13 13:21:57 -050010796 "0347 Adapter is very hot, please take "
10797 "corrective action. temperature : %d Celsius\n",
10798 (uint32_t) icmd->ulpContext);
10799 } else {
10800 temp_event_data.event_code = LPFC_NORMAL_TEMP;
Dick Kennedy372c1872020-06-30 14:50:00 -070010801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcb69f7d2011-12-13 13:21:57 -050010802 "0340 Adapter temperature is OK now. "
10803 "temperature : %d Celsius\n",
10804 (uint32_t) icmd->ulpContext);
10805 }
10806
10807 /* Send temperature change event to applications */
10808 shost = lpfc_shost_from_vport(phba->pport);
10809 fc_host_post_vendor_event(shost, fc_get_event_number(),
10810 sizeof(temp_event_data), (char *) &temp_event_data,
10811 LPFC_NL_VENDOR_ID);
10812 break;
10813 case ASYNC_STATUS_CN:
10814 lpfc_sli_abts_err_handler(phba, iocbq);
10815 break;
10816 default:
James Smarta257bf92009-04-06 18:48:10 -040010817 iocb_w = (uint32_t *) icmd;
Dick Kennedy372c1872020-06-30 14:50:00 -070010818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart76bb24e2007-10-27 13:38:00 -040010819 "0346 Ring %d handler: unexpected ASYNC_STATUS"
James Smarte4e74272009-07-19 10:01:38 -040010820 " evt_code 0x%x\n"
James Smarta257bf92009-04-06 18:48:10 -040010821 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10822 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10823 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10824 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
James Smartcb69f7d2011-12-13 13:21:57 -050010825 pring->ringno, icmd->un.asyncstat.evt_code,
James Smarta257bf92009-04-06 18:48:10 -040010826 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10827 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10828 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10829 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10830
James Smartcb69f7d2011-12-13 13:21:57 -050010831 break;
James Smart57127f12007-10-27 13:37:05 -040010832 }
James Smart57127f12007-10-27 13:37:05 -040010833}
10834
10835
James Smarte59058c2008-08-24 21:49:00 -040010836/**
James Smart895427b2017-02-12 13:52:30 -080010837 * lpfc_sli4_setup - SLI ring setup function
James Smarte59058c2008-08-24 21:49:00 -040010838 * @phba: Pointer to HBA context object.
10839 *
10840 * lpfc_sli_setup sets up rings of the SLI interface with
10841 * number of iocbs per ring and iotags. This function is
10842 * called while driver attach to the HBA and before the
10843 * interrupts are enabled. So there is no need for locking.
10844 *
10845 * This function always returns 0.
10846 **/
dea31012005-04-17 16:05:31 -050010847int
James Smart895427b2017-02-12 13:52:30 -080010848lpfc_sli4_setup(struct lpfc_hba *phba)
10849{
10850 struct lpfc_sli_ring *pring;
10851
10852 pring = phba->sli4_hba.els_wq->pring;
10853 pring->num_mask = LPFC_MAX_RING_MASK;
10854 pring->prt[0].profile = 0; /* Mask 0 */
10855 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10856 pring->prt[0].type = FC_TYPE_ELS;
10857 pring->prt[0].lpfc_sli_rcv_unsol_event =
10858 lpfc_els_unsol_event;
10859 pring->prt[1].profile = 0; /* Mask 1 */
10860 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10861 pring->prt[1].type = FC_TYPE_ELS;
10862 pring->prt[1].lpfc_sli_rcv_unsol_event =
10863 lpfc_els_unsol_event;
10864 pring->prt[2].profile = 0; /* Mask 2 */
10865 /* NameServer Inquiry */
10866 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10867 /* NameServer */
10868 pring->prt[2].type = FC_TYPE_CT;
10869 pring->prt[2].lpfc_sli_rcv_unsol_event =
10870 lpfc_ct_unsol_event;
10871 pring->prt[3].profile = 0; /* Mask 3 */
10872 /* NameServer response */
10873 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10874 /* NameServer */
10875 pring->prt[3].type = FC_TYPE_CT;
10876 pring->prt[3].lpfc_sli_rcv_unsol_event =
10877 lpfc_ct_unsol_event;
10878 return 0;
10879}
10880
10881/**
10882 * lpfc_sli_setup - SLI ring setup function
10883 * @phba: Pointer to HBA context object.
10884 *
10885 * lpfc_sli_setup sets up rings of the SLI interface with
10886 * number of iocbs per ring and iotags. This function is
10887 * called while driver attach to the HBA and before the
10888 * interrupts are enabled. So there is no need for locking.
10889 *
10890 * This function always returns 0. SLI3 only.
10891 **/
10892int
dea31012005-04-17 16:05:31 -050010893lpfc_sli_setup(struct lpfc_hba *phba)
10894{
James Smarted957682007-06-17 19:56:37 -050010895 int i, totiocbsize = 0;
dea31012005-04-17 16:05:31 -050010896 struct lpfc_sli *psli = &phba->sli;
10897 struct lpfc_sli_ring *pring;
10898
James Smart2a76a282012-08-03 12:35:54 -040010899 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea31012005-04-17 16:05:31 -050010900 psli->sli_flag = 0;
dea31012005-04-17 16:05:31 -050010901
James Bottomley604a3e32005-10-29 10:28:33 -050010902 psli->iocbq_lookup = NULL;
10903 psli->iocbq_lookup_len = 0;
10904 psli->last_iotag = 0;
10905
dea31012005-04-17 16:05:31 -050010906 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -080010907 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -050010908 switch (i) {
10909 case LPFC_FCP_RING: /* ring 0 - FCP */
10910 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -040010911 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10912 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10913 pring->sli.sli3.numCiocb +=
10914 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10915 pring->sli.sli3.numRiocb +=
10916 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10917 pring->sli.sli3.numCiocb +=
10918 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10919 pring->sli.sli3.numRiocb +=
10920 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10921 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010922 SLI3_IOCB_CMD_SIZE :
10923 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -040010924 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010925 SLI3_IOCB_RSP_SIZE :
10926 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -050010927 pring->iotag_ctr = 0;
10928 pring->iotag_max =
James Smart92d7f7b2007-06-17 19:56:38 -050010929 (phba->cfg_hba_queue_depth * 2);
dea31012005-04-17 16:05:31 -050010930 pring->fast_iotag = pring->iotag_max;
10931 pring->num_mask = 0;
10932 break;
James Smarta4bc3372006-12-02 13:34:16 -050010933 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea31012005-04-17 16:05:31 -050010934 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -040010935 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10936 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10937 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010938 SLI3_IOCB_CMD_SIZE :
10939 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -040010940 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010941 SLI3_IOCB_RSP_SIZE :
10942 SLI2_IOCB_RSP_SIZE;
James Smart2e0fef82007-06-17 19:56:36 -050010943 pring->iotag_max = phba->cfg_hba_queue_depth;
dea31012005-04-17 16:05:31 -050010944 pring->num_mask = 0;
10945 break;
10946 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10947 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -040010948 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10949 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10950 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010951 SLI3_IOCB_CMD_SIZE :
10952 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -040010953 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -050010954 SLI3_IOCB_RSP_SIZE :
10955 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -050010956 pring->fast_iotag = 0;
10957 pring->iotag_ctr = 0;
10958 pring->iotag_max = 4096;
James Smart57127f12007-10-27 13:37:05 -040010959 pring->lpfc_sli_rcv_async_status =
10960 lpfc_sli_async_event_handler;
James Smart6669f9b2009-10-02 15:16:45 -040010961 pring->num_mask = LPFC_MAX_RING_MASK;
dea31012005-04-17 16:05:31 -050010962 pring->prt[0].profile = 0; /* Mask 0 */
James Smart6a9c52c2009-10-02 15:16:51 -040010963 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10964 pring->prt[0].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -050010965 pring->prt[0].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010966 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -050010967 pring->prt[1].profile = 0; /* Mask 1 */
James Smart6a9c52c2009-10-02 15:16:51 -040010968 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10969 pring->prt[1].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -050010970 pring->prt[1].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010971 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -050010972 pring->prt[2].profile = 0; /* Mask 2 */
10973 /* NameServer Inquiry */
James Smart6a9c52c2009-10-02 15:16:51 -040010974 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea31012005-04-17 16:05:31 -050010975 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -040010976 pring->prt[2].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -050010977 pring->prt[2].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010978 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -050010979 pring->prt[3].profile = 0; /* Mask 3 */
10980 /* NameServer response */
James Smart6a9c52c2009-10-02 15:16:51 -040010981 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea31012005-04-17 16:05:31 -050010982 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -040010983 pring->prt[3].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -050010984 pring->prt[3].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -050010985 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -050010986 break;
10987 }
James Smart7e56aa22012-08-03 12:35:34 -040010988 totiocbsize += (pring->sli.sli3.numCiocb *
10989 pring->sli.sli3.sizeCiocb) +
10990 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea31012005-04-17 16:05:31 -050010991 }
James Smarted957682007-06-17 19:56:37 -050010992 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea31012005-04-17 16:05:31 -050010993 /* Too many cmd / rsp ring entries in SLI2 SLIM */
James Smarte8b62012007-08-02 11:10:09 -040010994 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10995 "SLI2 SLIM Data: x%x x%lx\n",
10996 phba->brd_no, totiocbsize,
10997 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea31012005-04-17 16:05:31 -050010998 }
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -050010999 if (phba->cfg_multi_ring_support == 2)
11000 lpfc_extra_ring_setup(phba);
dea31012005-04-17 16:05:31 -050011001
11002 return 0;
11003}
11004
James Smarte59058c2008-08-24 21:49:00 -040011005/**
James Smart895427b2017-02-12 13:52:30 -080011006 * lpfc_sli4_queue_init - Queue initialization function
James Smarte59058c2008-08-24 21:49:00 -040011007 * @phba: Pointer to HBA context object.
11008 *
James Smart895427b2017-02-12 13:52:30 -080011009 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
James Smarte59058c2008-08-24 21:49:00 -040011010 * ring. This function also initializes ring indices of each ring.
11011 * This function is called during the initialization of the SLI
11012 * interface of an HBA.
11013 * This function is called with no lock held and always returns
11014 * 1.
11015 **/
James Smart895427b2017-02-12 13:52:30 -080011016void
11017lpfc_sli4_queue_init(struct lpfc_hba *phba)
11018{
11019 struct lpfc_sli *psli;
11020 struct lpfc_sli_ring *pring;
11021 int i;
11022
11023 psli = &phba->sli;
11024 spin_lock_irq(&phba->hbalock);
11025 INIT_LIST_HEAD(&psli->mboxq);
11026 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11027 /* Initialize list headers for txq and txcmplq as double linked lists */
James Smartcdb42be2019-01-28 11:14:21 -080011028 for (i = 0; i < phba->cfg_hdw_queue; i++) {
James Smartc00f62e2019-08-14 16:57:11 -070011029 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
James Smart895427b2017-02-12 13:52:30 -080011030 pring->flag = 0;
11031 pring->ringno = LPFC_FCP_RING;
James Smartc4908502019-01-28 11:14:28 -080011032 pring->txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080011033 INIT_LIST_HEAD(&pring->txq);
11034 INIT_LIST_HEAD(&pring->txcmplq);
11035 INIT_LIST_HEAD(&pring->iocb_continueq);
11036 spin_lock_init(&pring->ring_lock);
11037 }
11038 pring = phba->sli4_hba.els_wq->pring;
11039 pring->flag = 0;
11040 pring->ringno = LPFC_ELS_RING;
James Smartc4908502019-01-28 11:14:28 -080011041 pring->txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080011042 INIT_LIST_HEAD(&pring->txq);
11043 INIT_LIST_HEAD(&pring->txcmplq);
11044 INIT_LIST_HEAD(&pring->iocb_continueq);
11045 spin_lock_init(&pring->ring_lock);
11046
James Smartcdb42be2019-01-28 11:14:21 -080011047 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart895427b2017-02-12 13:52:30 -080011048 pring = phba->sli4_hba.nvmels_wq->pring;
11049 pring->flag = 0;
11050 pring->ringno = LPFC_ELS_RING;
James Smartc4908502019-01-28 11:14:28 -080011051 pring->txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080011052 INIT_LIST_HEAD(&pring->txq);
11053 INIT_LIST_HEAD(&pring->txcmplq);
11054 INIT_LIST_HEAD(&pring->iocb_continueq);
11055 spin_lock_init(&pring->ring_lock);
11056 }
11057
11058 spin_unlock_irq(&phba->hbalock);
11059}
11060
11061/**
11062 * lpfc_sli_queue_init - Queue initialization function
11063 * @phba: Pointer to HBA context object.
11064 *
11065 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11066 * ring. This function also initializes ring indices of each ring.
11067 * This function is called during the initialization of the SLI
11068 * interface of an HBA.
11069 * This function is called with no lock held and always returns
11070 * 1.
11071 **/
11072void
11073lpfc_sli_queue_init(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -050011074{
11075 struct lpfc_sli *psli;
11076 struct lpfc_sli_ring *pring;
James Bottomley604a3e32005-10-29 10:28:33 -050011077 int i;
dea31012005-04-17 16:05:31 -050011078
11079 psli = &phba->sli;
James Smart2e0fef82007-06-17 19:56:36 -050011080 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011081 INIT_LIST_HEAD(&psli->mboxq);
James Smart92d7f7b2007-06-17 19:56:38 -050011082 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea31012005-04-17 16:05:31 -050011083 /* Initialize list headers for txq and txcmplq as double linked lists */
11084 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -080011085 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -050011086 pring->ringno = i;
James Smart7e56aa22012-08-03 12:35:34 -040011087 pring->sli.sli3.next_cmdidx = 0;
11088 pring->sli.sli3.local_getidx = 0;
11089 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -050011090 INIT_LIST_HEAD(&pring->iocb_continueq);
James Smart9c2face2008-01-11 01:53:18 -050011091 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea31012005-04-17 16:05:31 -050011092 INIT_LIST_HEAD(&pring->postbufq);
James Smart895427b2017-02-12 13:52:30 -080011093 pring->flag = 0;
11094 INIT_LIST_HEAD(&pring->txq);
11095 INIT_LIST_HEAD(&pring->txcmplq);
James Smart7e56aa22012-08-03 12:35:34 -040011096 spin_lock_init(&pring->ring_lock);
dea31012005-04-17 16:05:31 -050011097 }
James Smart2e0fef82007-06-17 19:56:36 -050011098 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011099}
11100
James Smarte59058c2008-08-24 21:49:00 -040011101/**
James Smart04c68492009-05-22 14:52:52 -040011102 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11103 * @phba: Pointer to HBA context object.
11104 *
11105 * This routine flushes the mailbox command subsystem. It will unconditionally
11106 * flush all the mailbox commands in the three possible stages in the mailbox
11107 * command sub-system: pending mailbox command queue; the outstanding mailbox
11108 * command; and completed mailbox command queue. It is caller's responsibility
11109 * to make sure that the driver is in the proper state to flush the mailbox
11110 * command sub-system. Namely, the posting of mailbox commands into the
11111 * pending mailbox command queue from the various clients must be stopped;
11112 * either the HBA is in a state that it will never works on the outstanding
11113 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11114 * mailbox command has been completed.
11115 **/
11116static void
11117lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11118{
11119 LIST_HEAD(completions);
11120 struct lpfc_sli *psli = &phba->sli;
11121 LPFC_MBOXQ_t *pmb;
11122 unsigned long iflag;
11123
James Smart523128e2018-09-10 10:30:46 -070011124 /* Disable softirqs, including timers from obtaining phba->hbalock */
11125 local_bh_disable();
11126
James Smart04c68492009-05-22 14:52:52 -040011127 /* Flush all the mailbox commands in the mbox system */
11128 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart523128e2018-09-10 10:30:46 -070011129
James Smart04c68492009-05-22 14:52:52 -040011130 /* The pending mailbox command queue */
11131 list_splice_init(&phba->sli.mboxq, &completions);
11132 /* The outstanding active mailbox command */
11133 if (psli->mbox_active) {
11134 list_add_tail(&psli->mbox_active->list, &completions);
11135 psli->mbox_active = NULL;
11136 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11137 }
11138 /* The completed mailbox command queue */
11139 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11140 spin_unlock_irqrestore(&phba->hbalock, iflag);
11141
James Smart523128e2018-09-10 10:30:46 -070011142 /* Enable softirqs again, done with phba->hbalock */
11143 local_bh_enable();
11144
James Smart04c68492009-05-22 14:52:52 -040011145 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11146 while (!list_empty(&completions)) {
11147 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11148 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11149 if (pmb->mbox_cmpl)
11150 pmb->mbox_cmpl(phba, pmb);
11151 }
11152}
11153
11154/**
James Smart3621a712009-04-06 18:47:14 -040011155 * lpfc_sli_host_down - Vport cleanup function
James Smarte59058c2008-08-24 21:49:00 -040011156 * @vport: Pointer to virtual port object.
11157 *
11158 * lpfc_sli_host_down is called to clean up the resources
11159 * associated with a vport before destroying virtual
11160 * port data structures.
11161 * This function does following operations:
11162 * - Free discovery resources associated with this virtual
11163 * port.
11164 * - Free iocbs associated with this virtual port in
11165 * the txq.
11166 * - Send abort for all iocb commands associated with this
11167 * vport in txcmplq.
11168 *
11169 * This function is called with no lock held and always returns 1.
11170 **/
dea31012005-04-17 16:05:31 -050011171int
James Smart92d7f7b2007-06-17 19:56:38 -050011172lpfc_sli_host_down(struct lpfc_vport *vport)
11173{
James Smart858c9f62007-06-17 19:56:39 -050011174 LIST_HEAD(completions);
James Smart92d7f7b2007-06-17 19:56:38 -050011175 struct lpfc_hba *phba = vport->phba;
11176 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -080011177 struct lpfc_queue *qp = NULL;
James Smart92d7f7b2007-06-17 19:56:38 -050011178 struct lpfc_sli_ring *pring;
11179 struct lpfc_iocbq *iocb, *next_iocb;
James Smart92d7f7b2007-06-17 19:56:38 -050011180 int i;
11181 unsigned long flags = 0;
11182 uint16_t prev_pring_flag;
11183
11184 lpfc_cleanup_discovery_resources(vport);
11185
11186 spin_lock_irqsave(&phba->hbalock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050011187
James Smart895427b2017-02-12 13:52:30 -080011188 /*
11189 * Error everything on the txq since these iocbs
11190 * have not been given to the FW yet.
11191 * Also issue ABTS for everything on the txcmplq
11192 */
11193 if (phba->sli_rev != LPFC_SLI_REV4) {
11194 for (i = 0; i < psli->num_rings; i++) {
11195 pring = &psli->sli3_ring[i];
11196 prev_pring_flag = pring->flag;
11197 /* Only slow rings */
11198 if (pring->ringno == LPFC_ELS_RING) {
11199 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11200 /* Set the lpfc data pending flag */
11201 set_bit(LPFC_DATA_READY, &phba->data_flags);
11202 }
11203 list_for_each_entry_safe(iocb, next_iocb,
11204 &pring->txq, list) {
11205 if (iocb->vport != vport)
11206 continue;
11207 list_move_tail(&iocb->list, &completions);
11208 }
11209 list_for_each_entry_safe(iocb, next_iocb,
11210 &pring->txcmplq, list) {
11211 if (iocb->vport != vport)
11212 continue;
James Smartdb7531d2020-11-15 11:26:44 -080011213 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11214 NULL);
James Smart895427b2017-02-12 13:52:30 -080011215 }
11216 pring->flag = prev_pring_flag;
James Smart92d7f7b2007-06-17 19:56:38 -050011217 }
James Smart895427b2017-02-12 13:52:30 -080011218 } else {
11219 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11220 pring = qp->pring;
11221 if (!pring)
11222 continue;
11223 if (pring == phba->sli4_hba.els_wq->pring) {
11224 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11225 /* Set the lpfc data pending flag */
11226 set_bit(LPFC_DATA_READY, &phba->data_flags);
11227 }
11228 prev_pring_flag = pring->flag;
James Smart65a3df62019-09-21 20:58:48 -070011229 spin_lock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080011230 list_for_each_entry_safe(iocb, next_iocb,
11231 &pring->txq, list) {
11232 if (iocb->vport != vport)
11233 continue;
11234 list_move_tail(&iocb->list, &completions);
11235 }
James Smart65a3df62019-09-21 20:58:48 -070011236 spin_unlock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080011237 list_for_each_entry_safe(iocb, next_iocb,
11238 &pring->txcmplq, list) {
11239 if (iocb->vport != vport)
11240 continue;
James Smartdb7531d2020-11-15 11:26:44 -080011241 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11242 NULL);
James Smart895427b2017-02-12 13:52:30 -080011243 }
11244 pring->flag = prev_pring_flag;
11245 }
James Smart92d7f7b2007-06-17 19:56:38 -050011246 }
James Smart92d7f7b2007-06-17 19:56:38 -050011247 spin_unlock_irqrestore(&phba->hbalock, flags);
11248
James Smarta22d73b2021-01-04 10:02:38 -080011249 /* Make sure HBA is alive */
11250 lpfc_issue_hb_tmo(phba);
11251
James Smarta257bf92009-04-06 18:48:10 -040011252 /* Cancel all the IOCBs from the completions list */
11253 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11254 IOERR_SLI_DOWN);
James Smart92d7f7b2007-06-17 19:56:38 -050011255 return 1;
11256}
11257
James Smarte59058c2008-08-24 21:49:00 -040011258/**
James Smart3621a712009-04-06 18:47:14 -040011259 * lpfc_sli_hba_down - Resource cleanup function for the HBA
James Smarte59058c2008-08-24 21:49:00 -040011260 * @phba: Pointer to HBA context object.
11261 *
11262 * This function cleans up all iocb, buffers, mailbox commands
11263 * while shutting down the HBA. This function is called with no
11264 * lock held and always returns 1.
11265 * This function does the following to cleanup driver resources:
11266 * - Free discovery resources for each virtual port
11267 * - Cleanup any pending fabric iocbs
11268 * - Iterate through the iocb txq and free each entry
11269 * in the list.
11270 * - Free up any buffer posted to the HBA
11271 * - Free mailbox commands in the mailbox queue.
11272 **/
James Smart92d7f7b2007-06-17 19:56:38 -050011273int
James Smart2e0fef82007-06-17 19:56:36 -050011274lpfc_sli_hba_down(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -050011275{
James Smart2534ba72007-04-25 09:52:20 -040011276 LIST_HEAD(completions);
James Smart2e0fef82007-06-17 19:56:36 -050011277 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -080011278 struct lpfc_queue *qp = NULL;
dea31012005-04-17 16:05:31 -050011279 struct lpfc_sli_ring *pring;
James Smart0ff10d42008-01-11 01:52:36 -050011280 struct lpfc_dmabuf *buf_ptr;
dea31012005-04-17 16:05:31 -050011281 unsigned long flags = 0;
James Smart04c68492009-05-22 14:52:52 -040011282 int i;
11283
11284 /* Shutdown the mailbox command sub-system */
James Smart618a5232012-06-12 13:54:36 -040011285 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea31012005-04-17 16:05:31 -050011286
dea31012005-04-17 16:05:31 -050011287 lpfc_hba_down_prep(phba);
11288
James Smart523128e2018-09-10 10:30:46 -070011289 /* Disable softirqs, including timers from obtaining phba->hbalock */
11290 local_bh_disable();
11291
James Smart92d7f7b2007-06-17 19:56:38 -050011292 lpfc_fabric_abort_hba(phba);
11293
James Smart2e0fef82007-06-17 19:56:36 -050011294 spin_lock_irqsave(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050011295
James Smart895427b2017-02-12 13:52:30 -080011296 /*
11297 * Error everything on the txq since these iocbs
11298 * have not been given to the FW yet.
11299 */
11300 if (phba->sli_rev != LPFC_SLI_REV4) {
11301 for (i = 0; i < psli->num_rings; i++) {
11302 pring = &psli->sli3_ring[i];
11303 /* Only slow rings */
11304 if (pring->ringno == LPFC_ELS_RING) {
11305 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11306 /* Set the lpfc data pending flag */
11307 set_bit(LPFC_DATA_READY, &phba->data_flags);
11308 }
11309 list_splice_init(&pring->txq, &completions);
11310 }
11311 } else {
11312 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11313 pring = qp->pring;
11314 if (!pring)
11315 continue;
James Smart4b0a42b2019-08-14 16:56:42 -070011316 spin_lock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080011317 list_splice_init(&pring->txq, &completions);
James Smart4b0a42b2019-08-14 16:56:42 -070011318 spin_unlock(&pring->ring_lock);
James Smart895427b2017-02-12 13:52:30 -080011319 if (pring == phba->sli4_hba.els_wq->pring) {
11320 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11321 /* Set the lpfc data pending flag */
11322 set_bit(LPFC_DATA_READY, &phba->data_flags);
11323 }
11324 }
dea31012005-04-17 16:05:31 -050011325 }
James Smart2e0fef82007-06-17 19:56:36 -050011326 spin_unlock_irqrestore(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050011327
James Smarta257bf92009-04-06 18:48:10 -040011328 /* Cancel all the IOCBs from the completions list */
11329 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11330 IOERR_SLI_DOWN);
James Smart2534ba72007-04-25 09:52:20 -040011331
James Smart0ff10d42008-01-11 01:52:36 -050011332 spin_lock_irqsave(&phba->hbalock, flags);
11333 list_splice_init(&phba->elsbuf, &completions);
11334 phba->elsbuf_cnt = 0;
11335 phba->elsbuf_prev_cnt = 0;
11336 spin_unlock_irqrestore(&phba->hbalock, flags);
11337
11338 while (!list_empty(&completions)) {
11339 list_remove_head(&completions, buf_ptr,
11340 struct lpfc_dmabuf, list);
11341 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11342 kfree(buf_ptr);
11343 }
11344
James Smart523128e2018-09-10 10:30:46 -070011345 /* Enable softirqs again, done with phba->hbalock */
11346 local_bh_enable();
11347
dea31012005-04-17 16:05:31 -050011348 /* Return any active mbox cmds */
11349 del_timer_sync(&psli->mbox_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -050011350
James Smartda0436e2009-05-22 14:51:39 -040011351 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050011352 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
James Smartda0436e2009-05-22 14:51:39 -040011353 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050011354
James Smartda0436e2009-05-22 14:51:39 -040011355 return 1;
11356}
James Smart92d7f7b2007-06-17 19:56:38 -050011357
James Smartda0436e2009-05-22 14:51:39 -040011358/**
James Smart3621a712009-04-06 18:47:14 -040011359 * lpfc_sli_pcimem_bcopy - SLI memory copy function
James Smarte59058c2008-08-24 21:49:00 -040011360 * @srcp: Source memory pointer.
11361 * @destp: Destination memory pointer.
11362 * @cnt: Number of words required to be copied.
11363 *
11364 * This function is used for copying data between driver memory
11365 * and the SLI memory. This function also changes the endianness
11366 * of each word if native endianness is different from SLI
11367 * endianness. This function can be called with or without
11368 * lock.
11369 **/
dea31012005-04-17 16:05:31 -050011370void
11371lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11372{
11373 uint32_t *src = srcp;
11374 uint32_t *dest = destp;
11375 uint32_t ldata;
11376 int i;
11377
11378 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11379 ldata = *src;
11380 ldata = le32_to_cpu(ldata);
11381 *dest = ldata;
11382 src++;
11383 dest++;
11384 }
11385}
11386
James Smarte59058c2008-08-24 21:49:00 -040011387
11388/**
James Smarta0c87cb2009-07-19 10:01:10 -040011389 * lpfc_sli_bemem_bcopy - SLI memory copy function
11390 * @srcp: Source memory pointer.
11391 * @destp: Destination memory pointer.
11392 * @cnt: Number of words required to be copied.
11393 *
11394 * This function is used for copying data between a data structure
11395 * with big endian representation to local endianness.
11396 * This function can be called with or without lock.
11397 **/
11398void
11399lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11400{
11401 uint32_t *src = srcp;
11402 uint32_t *dest = destp;
11403 uint32_t ldata;
11404 int i;
11405
11406 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11407 ldata = *src;
11408 ldata = be32_to_cpu(ldata);
11409 *dest = ldata;
11410 src++;
11411 dest++;
11412 }
11413}
11414
11415/**
James Smart3621a712009-04-06 18:47:14 -040011416 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
James Smarte59058c2008-08-24 21:49:00 -040011417 * @phba: Pointer to HBA context object.
11418 * @pring: Pointer to driver SLI ring object.
11419 * @mp: Pointer to driver buffer object.
11420 *
11421 * This function is called with no lock held.
11422 * It always return zero after adding the buffer to the postbufq
11423 * buffer list.
11424 **/
dea31012005-04-17 16:05:31 -050011425int
James Smart2e0fef82007-06-17 19:56:36 -050011426lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11427 struct lpfc_dmabuf *mp)
dea31012005-04-17 16:05:31 -050011428{
11429 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11430 later */
James Smart2e0fef82007-06-17 19:56:36 -050011431 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011432 list_add_tail(&mp->list, &pring->postbufq);
dea31012005-04-17 16:05:31 -050011433 pring->postbufq_cnt++;
James Smart2e0fef82007-06-17 19:56:36 -050011434 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011435 return 0;
11436}
11437
James Smarte59058c2008-08-24 21:49:00 -040011438/**
James Smart3621a712009-04-06 18:47:14 -040011439 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
James Smarte59058c2008-08-24 21:49:00 -040011440 * @phba: Pointer to HBA context object.
11441 *
11442 * When HBQ is enabled, buffers are searched based on tags. This function
11443 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11444 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11445 * does not conflict with tags of buffer posted for unsolicited events.
11446 * The function returns the allocated tag. The function is called with
11447 * no locks held.
11448 **/
James Smart76bb24e2007-10-27 13:38:00 -040011449uint32_t
11450lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11451{
11452 spin_lock_irq(&phba->hbalock);
11453 phba->buffer_tag_count++;
11454 /*
11455 * Always set the QUE_BUFTAG_BIT to distiguish between
11456 * a tag assigned by HBQ.
11457 */
11458 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11459 spin_unlock_irq(&phba->hbalock);
11460 return phba->buffer_tag_count;
11461}
11462
James Smarte59058c2008-08-24 21:49:00 -040011463/**
James Smart3621a712009-04-06 18:47:14 -040011464 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
James Smarte59058c2008-08-24 21:49:00 -040011465 * @phba: Pointer to HBA context object.
11466 * @pring: Pointer to driver SLI ring object.
11467 * @tag: Buffer tag.
11468 *
11469 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11470 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11471 * iocb is posted to the response ring with the tag of the buffer.
11472 * This function searches the pring->postbufq list using the tag
11473 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11474 * iocb. If the buffer is found then lpfc_dmabuf object of the
11475 * buffer is returned to the caller else NULL is returned.
11476 * This function is called with no lock held.
11477 **/
James Smart76bb24e2007-10-27 13:38:00 -040011478struct lpfc_dmabuf *
11479lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11480 uint32_t tag)
11481{
11482 struct lpfc_dmabuf *mp, *next_mp;
11483 struct list_head *slp = &pring->postbufq;
11484
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011485 /* Search postbufq, from the beginning, looking for a match on tag */
James Smart76bb24e2007-10-27 13:38:00 -040011486 spin_lock_irq(&phba->hbalock);
11487 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11488 if (mp->buffer_tag == tag) {
11489 list_del_init(&mp->list);
11490 pring->postbufq_cnt--;
11491 spin_unlock_irq(&phba->hbalock);
11492 return mp;
11493 }
11494 }
11495
11496 spin_unlock_irq(&phba->hbalock);
Dick Kennedy372c1872020-06-30 14:50:00 -070011497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartd7c255b2008-08-24 21:50:00 -040011498 "0402 Cannot find virtual addr for buffer tag on "
James Smart32350662019-08-14 16:57:06 -070011499 "ring %d Data x%lx x%px x%px x%x\n",
James Smart76bb24e2007-10-27 13:38:00 -040011500 pring->ringno, (unsigned long) tag,
11501 slp->next, slp->prev, pring->postbufq_cnt);
11502
11503 return NULL;
11504}
dea31012005-04-17 16:05:31 -050011505
James Smarte59058c2008-08-24 21:49:00 -040011506/**
James Smart3621a712009-04-06 18:47:14 -040011507 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
James Smarte59058c2008-08-24 21:49:00 -040011508 * @phba: Pointer to HBA context object.
11509 * @pring: Pointer to driver SLI ring object.
11510 * @phys: DMA address of the buffer.
11511 *
11512 * This function searches the buffer list using the dma_address
11513 * of unsolicited event to find the driver's lpfc_dmabuf object
11514 * corresponding to the dma_address. The function returns the
11515 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11516 * This function is called by the ct and els unsolicited event
11517 * handlers to get the buffer associated with the unsolicited
11518 * event.
11519 *
11520 * This function is called with no lock held.
11521 **/
dea31012005-04-17 16:05:31 -050011522struct lpfc_dmabuf *
11523lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11524 dma_addr_t phys)
11525{
11526 struct lpfc_dmabuf *mp, *next_mp;
11527 struct list_head *slp = &pring->postbufq;
11528
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011529 /* Search postbufq, from the beginning, looking for a match on phys */
James Smart2e0fef82007-06-17 19:56:36 -050011530 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011531 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11532 if (mp->phys == phys) {
11533 list_del_init(&mp->list);
11534 pring->postbufq_cnt--;
James Smart2e0fef82007-06-17 19:56:36 -050011535 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050011536 return mp;
11537 }
11538 }
11539
James Smart2e0fef82007-06-17 19:56:36 -050011540 spin_unlock_irq(&phba->hbalock);
Dick Kennedy372c1872020-06-30 14:50:00 -070011541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -040011542 "0410 Cannot find virtual addr for mapped buf on "
James Smart32350662019-08-14 16:57:06 -070011543 "ring %d Data x%llx x%px x%px x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011544 pring->ringno, (unsigned long long)phys,
dea31012005-04-17 16:05:31 -050011545 slp->next, slp->prev, pring->postbufq_cnt);
11546 return NULL;
11547}
11548
James Smarte59058c2008-08-24 21:49:00 -040011549/**
James Smart3621a712009-04-06 18:47:14 -040011550 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
James Smarte59058c2008-08-24 21:49:00 -040011551 * @phba: Pointer to HBA context object.
11552 * @cmdiocb: Pointer to driver command iocb object.
11553 * @rspiocb: Pointer to driver response iocb object.
11554 *
11555 * This function is the completion handler for the abort iocbs for
11556 * ELS commands. This function is called from the ELS ring event
11557 * handler with no lock held. This function frees memory resources
11558 * associated with the abort iocb.
11559 **/
dea31012005-04-17 16:05:31 -050011560static void
James Smart2e0fef82007-06-17 19:56:36 -050011561lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11562 struct lpfc_iocbq *rspiocb)
dea31012005-04-17 16:05:31 -050011563{
James Smart2e0fef82007-06-17 19:56:36 -050011564 IOCB_t *irsp = &rspiocb->iocb;
James Smart2680eea2007-04-25 09:52:55 -040011565 uint16_t abort_iotag, abort_context;
James Smartff78d8f2011-12-13 13:21:35 -050011566 struct lpfc_iocbq *abort_iocb = NULL;
James Smart2680eea2007-04-25 09:52:55 -040011567
11568 if (irsp->ulpStatus) {
James Smartff78d8f2011-12-13 13:21:35 -050011569
11570 /*
11571 * Assume that the port already completed and returned, or
11572 * will return the iocb. Just Log the message.
11573 */
James Smart2680eea2007-04-25 09:52:55 -040011574 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11575 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11576
James Smart2e0fef82007-06-17 19:56:36 -050011577 spin_lock_irq(&phba->hbalock);
James Smart45ed1192009-10-02 15:17:02 -040011578 if (phba->sli_rev < LPFC_SLI_REV4) {
James Smartfaa832e2018-07-31 17:23:18 -070011579 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11580 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11581 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11582 spin_unlock_irq(&phba->hbalock);
11583 goto release_iocb;
11584 }
James Smart45ed1192009-10-02 15:17:02 -040011585 if (abort_iotag != 0 &&
11586 abort_iotag <= phba->sli.last_iotag)
11587 abort_iocb =
11588 phba->sli.iocbq_lookup[abort_iotag];
11589 } else
11590 /* For sli4 the abort_tag is the XRI,
11591 * so the abort routine puts the iotag of the iocb
11592 * being aborted in the context field of the abort
11593 * IOCB.
11594 */
11595 abort_iocb = phba->sli.iocbq_lookup[abort_context];
James Smart2680eea2007-04-25 09:52:55 -040011596
James Smart2a9bf3d2010-06-07 15:24:45 -040011597 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
James Smart32350662019-08-14 16:57:06 -070011598 "0327 Cannot abort els iocb x%px "
James Smart2a9bf3d2010-06-07 15:24:45 -040011599 "with tag %x context %x, abort status %x, "
11600 "abort code %x\n",
11601 abort_iocb, abort_iotag, abort_context,
11602 irsp->ulpStatus, irsp->un.ulpWord[4]);
James Smart2680eea2007-04-25 09:52:55 -040011603
James Smartff78d8f2011-12-13 13:21:35 -050011604 spin_unlock_irq(&phba->hbalock);
James Smart2680eea2007-04-25 09:52:55 -040011605 }
James Smartfaa832e2018-07-31 17:23:18 -070011606release_iocb:
James Bottomley604a3e32005-10-29 10:28:33 -050011607 lpfc_sli_release_iocbq(phba, cmdiocb);
dea31012005-04-17 16:05:31 -050011608 return;
11609}
11610
James Smarte59058c2008-08-24 21:49:00 -040011611/**
James Smart3621a712009-04-06 18:47:14 -040011612 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
James Smarte59058c2008-08-24 21:49:00 -040011613 * @phba: Pointer to HBA context object.
11614 * @cmdiocb: Pointer to driver command iocb object.
11615 * @rspiocb: Pointer to driver response iocb object.
11616 *
11617 * The function is called from SLI ring event handler with no
11618 * lock held. This function is the completion handler for ELS commands
11619 * which are aborted. The function frees memory resources used for
11620 * the aborted ELS commands.
11621 **/
James Smart9dd83f72021-03-01 09:18:11 -080011622void
James Smart92d7f7b2007-06-17 19:56:38 -050011623lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11624 struct lpfc_iocbq *rspiocb)
11625{
11626 IOCB_t *irsp = &rspiocb->iocb;
11627
11628 /* ELS cmd tag <ulpIoTag> completes */
11629 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smartd7c255b2008-08-24 21:50:00 -040011630 "0139 Ignoring ELS cmd tag x%x completion Data: "
James Smart92d7f7b2007-06-17 19:56:38 -050011631 "x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011632 irsp->ulpIoTag, irsp->ulpStatus,
James Smart92d7f7b2007-06-17 19:56:38 -050011633 irsp->un.ulpWord[4], irsp->ulpTimeout);
James Smart4430f7f2020-11-15 11:26:31 -080011634 lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
James Smart858c9f62007-06-17 19:56:39 -050011635 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11636 lpfc_ct_free_iocb(phba, cmdiocb);
11637 else
11638 lpfc_els_free_iocb(phba, cmdiocb);
James Smart92d7f7b2007-06-17 19:56:38 -050011639}
11640
James Smarte59058c2008-08-24 21:49:00 -040011641/**
James Smartdb7531d2020-11-15 11:26:44 -080011642 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
James Smarte59058c2008-08-24 21:49:00 -040011643 * @phba: Pointer to HBA context object.
11644 * @pring: Pointer to driver SLI ring object.
11645 * @cmdiocb: Pointer to driver command iocb object.
James Smartdb7531d2020-11-15 11:26:44 -080011646 * @cmpl: completion function.
James Smarte59058c2008-08-24 21:49:00 -040011647 *
James Smartdb7531d2020-11-15 11:26:44 -080011648 * This function issues an abort iocb for the provided command iocb. In case
11649 * of unloading, the abort iocb will not be issued to commands on the ELS
11650 * ring. Instead, the callback function shall be changed to those commands
11651 * so that nothing happens when them finishes. This function is called with
11652 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
11653 * when the command iocb is an abort request.
11654 *
James Smarte59058c2008-08-24 21:49:00 -040011655 **/
James Smartdb7531d2020-11-15 11:26:44 -080011656int
11657lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11658 struct lpfc_iocbq *cmdiocb, void *cmpl)
dea31012005-04-17 16:05:31 -050011659{
James Smart2e0fef82007-06-17 19:56:36 -050011660 struct lpfc_vport *vport = cmdiocb->vport;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011661 struct lpfc_iocbq *abtsiocbp;
dea31012005-04-17 16:05:31 -050011662 IOCB_t *icmd = NULL;
11663 IOCB_t *iabt = NULL;
James Smartdb7531d2020-11-15 11:26:44 -080011664 int retval = IOCB_ERROR;
James Smart7e56aa22012-08-03 12:35:34 -040011665 unsigned long iflags;
James Smartfaa832e2018-07-31 17:23:18 -070011666 struct lpfc_nodelist *ndlp;
James Smart07951072007-04-25 09:51:38 -040011667
James Smart92d7f7b2007-06-17 19:56:38 -050011668 /*
11669 * There are certain command types we don't want to abort. And we
11670 * don't want to abort commands that are already in the process of
11671 * being aborted.
James Smart07951072007-04-25 09:51:38 -040011672 */
11673 icmd = &cmdiocb->iocb;
James Smart2e0fef82007-06-17 19:56:36 -050011674 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
James Smart92d7f7b2007-06-17 19:56:38 -050011675 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
James Smart078c68b2021-04-11 18:31:12 -070011676 cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
James Smartdb7531d2020-11-15 11:26:44 -080011677 return IOCB_ABORTING;
11678
11679 if (!pring) {
11680 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11681 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11682 else
11683 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11684 return retval;
11685 }
11686
11687 /*
11688 * If we're unloading, don't abort iocb on the ELS ring, but change
11689 * the callback so that nothing happens when it finishes.
11690 */
11691 if ((vport->load_flag & FC_UNLOADING) &&
11692 pring->ringno == LPFC_ELS_RING) {
11693 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11694 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11695 else
11696 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11697 return retval;
11698 }
James Smart07951072007-04-25 09:51:38 -040011699
dea31012005-04-17 16:05:31 -050011700 /* issue ABTS for this IOCB based on iotag */
James Smart92d7f7b2007-06-17 19:56:38 -050011701 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050011702 if (abtsiocbp == NULL)
James Smartdb7531d2020-11-15 11:26:44 -080011703 return IOCB_NORESOURCE;
dea31012005-04-17 16:05:31 -050011704
James Smart07951072007-04-25 09:51:38 -040011705 /* This signals the response to set the correct status
James Smart341af102010-01-26 23:07:37 -050011706 * before calling the completion handler
James Smart07951072007-04-25 09:51:38 -040011707 */
11708 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11709
dea31012005-04-17 16:05:31 -050011710 iabt = &abtsiocbp->iocb;
James Smart07951072007-04-25 09:51:38 -040011711 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11712 iabt->un.acxri.abortContextTag = icmd->ulpContext;
James Smart45ed1192009-10-02 15:17:02 -040011713 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -040011714 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
James Smartdb7531d2020-11-15 11:26:44 -080011715 if (pring->ringno == LPFC_ELS_RING)
11716 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
James Smartfaa832e2018-07-31 17:23:18 -070011717 } else {
James Smartda0436e2009-05-22 14:51:39 -040011718 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
James Smartfaa832e2018-07-31 17:23:18 -070011719 if (pring->ringno == LPFC_ELS_RING) {
11720 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11721 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11722 }
11723 }
dea31012005-04-17 16:05:31 -050011724 iabt->ulpLe = 1;
James Smart07951072007-04-25 09:51:38 -040011725 iabt->ulpClass = icmd->ulpClass;
dea31012005-04-17 16:05:31 -050011726
James Smart5ffc2662009-11-18 15:39:44 -050011727 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080011728 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
James Smartdb7531d2020-11-15 11:26:44 -080011729 if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
11730 abtsiocbp->iocb_flag |= LPFC_IO_FCP;
James Smart341af102010-01-26 23:07:37 -050011731 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smartdb7531d2020-11-15 11:26:44 -080011732 }
James Smart9bd2bff52014-09-03 12:57:30 -040011733 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11734 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050011735
James Smart2e0fef82007-06-17 19:56:36 -050011736 if (phba->link_state >= LPFC_LINK_UP)
James Smart07951072007-04-25 09:51:38 -040011737 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11738 else
11739 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11740
James Smartdb7531d2020-11-15 11:26:44 -080011741 if (cmpl)
11742 abtsiocbp->iocb_cmpl = cmpl;
11743 else
11744 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
James Smarte6c6acc2016-12-19 15:07:23 -080011745 abtsiocbp->vport = vport;
James Smart5b8bd0c2007-04-25 09:52:49 -040011746
James Smart7e56aa22012-08-03 12:35:34 -040011747 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -080011748 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11749 if (unlikely(pring == NULL))
James Smartdb7531d2020-11-15 11:26:44 -080011750 goto abort_iotag_exit;
James Smart7e56aa22012-08-03 12:35:34 -040011751 /* Note: both hbalock and ring_lock need to be set here */
11752 spin_lock_irqsave(&pring->ring_lock, iflags);
11753 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11754 abtsiocbp, 0);
11755 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11756 } else {
11757 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11758 abtsiocbp, 0);
11759 }
James Smart07951072007-04-25 09:51:38 -040011760
11761abort_iotag_exit:
James Smartdb7531d2020-11-15 11:26:44 -080011762
11763 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11764 "0339 Abort xri x%x, original iotag x%x, "
11765 "abort cmd iotag x%x retval x%x\n",
11766 iabt->un.acxri.abortIoTag,
11767 iabt->un.acxri.abortContextTag,
11768 abtsiocbp->iotag, retval);
11769
11770 if (retval) {
11771 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11772 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11773 }
11774
James Smart2e0fef82007-06-17 19:56:36 -050011775 /*
11776 * Caller to this routine should check for IOCB_ERROR
11777 * and handle it properly. This routine no longer removes
11778 * iocb off txcmplq and call compl in case of IOCB_ERROR.
James Smart07951072007-04-25 09:51:38 -040011779 */
James Smart2e0fef82007-06-17 19:56:36 -050011780 return retval;
dea31012005-04-17 16:05:31 -050011781}
11782
James Smarte59058c2008-08-24 21:49:00 -040011783/**
James Smart5af5eee2010-10-22 11:06:38 -040011784 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11785 * @phba: pointer to lpfc HBA data structure.
11786 *
11787 * This routine will abort all pending and outstanding iocbs to an HBA.
11788 **/
11789void
11790lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11791{
11792 struct lpfc_sli *psli = &phba->sli;
11793 struct lpfc_sli_ring *pring;
James Smart895427b2017-02-12 13:52:30 -080011794 struct lpfc_queue *qp = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040011795 int i;
11796
James Smart895427b2017-02-12 13:52:30 -080011797 if (phba->sli_rev != LPFC_SLI_REV4) {
11798 for (i = 0; i < psli->num_rings; i++) {
11799 pring = &psli->sli3_ring[i];
11800 lpfc_sli_abort_iocb_ring(phba, pring);
11801 }
11802 return;
11803 }
11804 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11805 pring = qp->pring;
11806 if (!pring)
11807 continue;
James Smartdb55fba2014-04-04 13:52:02 -040011808 lpfc_sli_abort_iocb_ring(phba, pring);
James Smart5af5eee2010-10-22 11:06:38 -040011809 }
11810}
11811
11812/**
James Smart3621a712009-04-06 18:47:14 -040011813 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
James Smarte59058c2008-08-24 21:49:00 -040011814 * @iocbq: Pointer to driver iocb object.
11815 * @vport: Pointer to driver virtual port object.
11816 * @tgt_id: SCSI ID of the target.
11817 * @lun_id: LUN ID of the scsi device.
11818 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11819 *
James Smart3621a712009-04-06 18:47:14 -040011820 * This function acts as an iocb filter for functions which abort or count
James Smarte59058c2008-08-24 21:49:00 -040011821 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11822 * 0 if the filtering criteria is met for the given iocb and will return
11823 * 1 if the filtering criteria is not met.
11824 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11825 * given iocb is for the SCSI device specified by vport, tgt_id and
11826 * lun_id parameter.
11827 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11828 * given iocb is for the SCSI target specified by vport and tgt_id
11829 * parameters.
11830 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11831 * given iocb is for the SCSI host associated with the given vport.
11832 * This function is called with no locks held.
11833 **/
dea31012005-04-17 16:05:31 -050011834static int
James Smart51ef4c22007-08-02 11:10:31 -040011835lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11836 uint16_t tgt_id, uint64_t lun_id,
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011837 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050011838{
James Smartc4908502019-01-28 11:14:28 -080011839 struct lpfc_io_buf *lpfc_cmd;
James Smarte1364712021-04-21 16:44:33 -070011840 IOCB_t *icmd = NULL;
dea31012005-04-17 16:05:31 -050011841 int rc = 1;
11842
James Smart9ec58ec2021-01-04 10:02:35 -080011843 if (!iocbq || iocbq->vport != vport)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011844 return rc;
11845
James Smarte1364712021-04-21 16:44:33 -070011846 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11847 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11848 iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11849 return rc;
11850
11851 icmd = &iocbq->iocb;
11852 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11853 icmd->ulpCommand == CMD_CLOSE_XRI_CN)
James Smart51ef4c22007-08-02 11:10:31 -040011854 return rc;
11855
James Smartc4908502019-01-28 11:14:28 -080011856 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011857
James Smart495a7142008-06-14 22:52:59 -040011858 if (lpfc_cmd->pCmd == NULL)
dea31012005-04-17 16:05:31 -050011859 return rc;
11860
11861 switch (ctx_cmd) {
11862 case LPFC_CTX_LUN:
James Smartb0e83012018-06-26 08:24:29 -070011863 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
James Smart495a7142008-06-14 22:52:59 -040011864 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11865 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea31012005-04-17 16:05:31 -050011866 rc = 0;
11867 break;
11868 case LPFC_CTX_TGT:
James Smartb0e83012018-06-26 08:24:29 -070011869 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
James Smart495a7142008-06-14 22:52:59 -040011870 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea31012005-04-17 16:05:31 -050011871 rc = 0;
11872 break;
dea31012005-04-17 16:05:31 -050011873 case LPFC_CTX_HOST:
11874 rc = 0;
11875 break;
11876 default:
11877 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -070011878 __func__, ctx_cmd);
dea31012005-04-17 16:05:31 -050011879 break;
11880 }
11881
11882 return rc;
11883}
11884
James Smarte59058c2008-08-24 21:49:00 -040011885/**
James Smart3621a712009-04-06 18:47:14 -040011886 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
James Smarte59058c2008-08-24 21:49:00 -040011887 * @vport: Pointer to virtual port.
11888 * @tgt_id: SCSI ID of the target.
11889 * @lun_id: LUN ID of the scsi device.
11890 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11891 *
11892 * This function returns number of FCP commands pending for the vport.
11893 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11894 * commands pending on the vport associated with SCSI device specified
11895 * by tgt_id and lun_id parameters.
11896 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11897 * commands pending on the vport associated with SCSI target specified
11898 * by tgt_id parameter.
11899 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11900 * commands pending on the vport.
11901 * This function returns the number of iocbs which satisfy the filter.
11902 * This function is called without any lock held.
11903 **/
dea31012005-04-17 16:05:31 -050011904int
James Smart51ef4c22007-08-02 11:10:31 -040011905lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11906 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050011907{
James Smart51ef4c22007-08-02 11:10:31 -040011908 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011909 struct lpfc_iocbq *iocbq;
11910 int sum, i;
dea31012005-04-17 16:05:31 -050011911
Johannes Thumshirn31979002016-07-18 16:06:03 +020011912 spin_lock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011913 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11914 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050011915
James Smart51ef4c22007-08-02 11:10:31 -040011916 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11917 ctx_cmd) == 0)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011918 sum++;
dea31012005-04-17 16:05:31 -050011919 }
Johannes Thumshirn31979002016-07-18 16:06:03 +020011920 spin_unlock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040011921
dea31012005-04-17 16:05:31 -050011922 return sum;
11923}
11924
James Smarte59058c2008-08-24 21:49:00 -040011925/**
James Smartdb7531d2020-11-15 11:26:44 -080011926 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11927 * @phba: Pointer to HBA context object
11928 * @cmdiocb: Pointer to command iocb object.
11929 * @wcqe: pointer to the complete wcqe
11930 *
11931 * This function is called when an aborted FCP iocb completes. This
11932 * function is called by the ring event handler with no lock held.
11933 * This function frees the iocb. It is called for sli-4 adapters.
11934 **/
11935void
11936lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11937 struct lpfc_wcqe_complete *wcqe)
11938{
11939 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11940 "3017 ABORT_XRI_CN completing on rpi x%x "
11941 "original iotag x%x, abort cmd iotag x%x "
11942 "status 0x%x, reason 0x%x\n",
11943 cmdiocb->iocb.un.acxri.abortContextTag,
11944 cmdiocb->iocb.un.acxri.abortIoTag,
11945 cmdiocb->iotag,
11946 (bf_get(lpfc_wcqe_c_status, wcqe)
11947 & LPFC_IOCB_STATUS_MASK),
11948 wcqe->parameter);
11949 lpfc_sli_release_iocbq(phba, cmdiocb);
11950}
11951
11952/**
James Smart3621a712009-04-06 18:47:14 -040011953 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
James Smarte59058c2008-08-24 21:49:00 -040011954 * @phba: Pointer to HBA context object
11955 * @cmdiocb: Pointer to command iocb object.
11956 * @rspiocb: Pointer to response iocb object.
11957 *
11958 * This function is called when an aborted FCP iocb completes. This
11959 * function is called by the ring event handler with no lock held.
11960 * This function frees the iocb.
11961 **/
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011962void
James Smart2e0fef82007-06-17 19:56:36 -050011963lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11964 struct lpfc_iocbq *rspiocb)
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011965{
James Smartcb69f7d2011-12-13 13:21:57 -050011966 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart8e668af2013-05-31 17:04:28 -040011967 "3096 ABORT_XRI_CN completing on rpi x%x "
James Smartcb69f7d2011-12-13 13:21:57 -050011968 "original iotag x%x, abort cmd iotag x%x "
11969 "status 0x%x, reason 0x%x\n",
11970 cmdiocb->iocb.un.acxri.abortContextTag,
11971 cmdiocb->iocb.un.acxri.abortIoTag,
11972 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11973 rspiocb->iocb.un.ulpWord[4]);
James Bottomley604a3e32005-10-29 10:28:33 -050011974 lpfc_sli_release_iocbq(phba, cmdiocb);
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040011975 return;
11976}
11977
James Smarte59058c2008-08-24 21:49:00 -040011978/**
James Smart3621a712009-04-06 18:47:14 -040011979 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
James Smarte59058c2008-08-24 21:49:00 -040011980 * @vport: Pointer to virtual port.
James Smarte59058c2008-08-24 21:49:00 -040011981 * @tgt_id: SCSI ID of the target.
11982 * @lun_id: LUN ID of the scsi device.
11983 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11984 *
11985 * This function sends an abort command for every SCSI command
11986 * associated with the given virtual port pending on the ring
11987 * filtered by lpfc_sli_validate_fcp_iocb function.
11988 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11989 * FCP iocbs associated with lun specified by tgt_id and lun_id
11990 * parameters
11991 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11992 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11993 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11994 * FCP iocbs associated with virtual port.
James Smart078c68b2021-04-11 18:31:12 -070011995 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
11996 * lpfc_sli4_calc_ring is used.
James Smarte59058c2008-08-24 21:49:00 -040011997 * This function returns number of iocbs it failed to abort.
11998 * This function is called with no locks held.
11999 **/
dea31012005-04-17 16:05:31 -050012000int
James Smart078c68b2021-04-11 18:31:12 -070012001lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12002 lpfc_ctx_cmd abort_cmd)
dea31012005-04-17 16:05:31 -050012003{
James Smart51ef4c22007-08-02 11:10:31 -040012004 struct lpfc_hba *phba = vport->phba;
James Smart078c68b2021-04-11 18:31:12 -070012005 struct lpfc_sli_ring *pring = NULL;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040012006 struct lpfc_iocbq *iocbq;
dea31012005-04-17 16:05:31 -050012007 int errcnt = 0, ret_val = 0;
James Smartdb7531d2020-11-15 11:26:44 -080012008 unsigned long iflags;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040012009 int i;
James Smart078c68b2021-04-11 18:31:12 -070012010 void *fcp_cmpl = NULL;
dea31012005-04-17 16:05:31 -050012011
James Smartb0e83012018-06-26 08:24:29 -070012012 /* all I/Os are in process of being flushed */
James Smartc00f62e2019-08-14 16:57:11 -070012013 if (phba->hba_flag & HBA_IOQ_FLUSH)
James Smartb0e83012018-06-26 08:24:29 -070012014 return errcnt;
12015
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040012016 for (i = 1; i <= phba->sli.last_iotag; i++) {
12017 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050012018
James Smart51ef4c22007-08-02 11:10:31 -040012019 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
James Smart2e0fef82007-06-17 19:56:36 -050012020 abort_cmd) != 0)
dea31012005-04-17 16:05:31 -050012021 continue;
12022
James Smartdb7531d2020-11-15 11:26:44 -080012023 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart078c68b2021-04-11 18:31:12 -070012024 if (phba->sli_rev == LPFC_SLI_REV3) {
12025 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12026 fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12027 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12028 pring = lpfc_sli4_calc_ring(phba, iocbq);
12029 fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12030 }
James Smartdb7531d2020-11-15 11:26:44 -080012031 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
James Smart078c68b2021-04-11 18:31:12 -070012032 fcp_cmpl);
James Smartdb7531d2020-11-15 11:26:44 -080012033 spin_unlock_irqrestore(&phba->hbalock, iflags);
12034 if (ret_val != IOCB_SUCCESS)
dea31012005-04-17 16:05:31 -050012035 errcnt++;
dea31012005-04-17 16:05:31 -050012036 }
12037
12038 return errcnt;
12039}
12040
James Smarte59058c2008-08-24 21:49:00 -040012041/**
James Smart98912dda2014-04-04 13:52:31 -040012042 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12043 * @vport: Pointer to virtual port.
12044 * @pring: Pointer to driver SLI ring object.
12045 * @tgt_id: SCSI ID of the target.
12046 * @lun_id: LUN ID of the scsi device.
Lee Jones7af29d42020-07-21 17:41:31 +010012047 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
James Smart98912dda2014-04-04 13:52:31 -040012048 *
12049 * This function sends an abort command for every SCSI command
12050 * associated with the given virtual port pending on the ring
12051 * filtered by lpfc_sli_validate_fcp_iocb function.
12052 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12053 * FCP iocbs associated with lun specified by tgt_id and lun_id
12054 * parameters
12055 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12056 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12057 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12058 * FCP iocbs associated with virtual port.
12059 * This function returns number of iocbs it aborted .
12060 * This function is called with no locks held right after a taskmgmt
12061 * command is sent.
12062 **/
12063int
12064lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12065 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12066{
12067 struct lpfc_hba *phba = vport->phba;
James Smartc4908502019-01-28 11:14:28 -080012068 struct lpfc_io_buf *lpfc_cmd;
James Smart98912dda2014-04-04 13:52:31 -040012069 struct lpfc_iocbq *abtsiocbq;
James Smart8c50d252014-09-03 12:58:16 -040012070 struct lpfc_nodelist *ndlp;
James Smart98912dda2014-04-04 13:52:31 -040012071 struct lpfc_iocbq *iocbq;
12072 IOCB_t *icmd;
12073 int sum, i, ret_val;
12074 unsigned long iflags;
James Smartc2017262019-01-28 11:14:37 -080012075 struct lpfc_sli_ring *pring_s4 = NULL;
James Smart98912dda2014-04-04 13:52:31 -040012076
James Smart59c68ea2018-04-09 14:24:25 -070012077 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart98912dda2014-04-04 13:52:31 -040012078
12079 /* all I/Os are in process of being flushed */
James Smartc00f62e2019-08-14 16:57:11 -070012080 if (phba->hba_flag & HBA_IOQ_FLUSH) {
James Smart59c68ea2018-04-09 14:24:25 -070012081 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart98912dda2014-04-04 13:52:31 -040012082 return 0;
12083 }
12084 sum = 0;
12085
12086 for (i = 1; i <= phba->sli.last_iotag; i++) {
12087 iocbq = phba->sli.iocbq_lookup[i];
12088
12089 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12090 cmd) != 0)
12091 continue;
12092
James Smartc2017262019-01-28 11:14:37 -080012093 /* Guard against IO completion being called at same time */
12094 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12095 spin_lock(&lpfc_cmd->buf_lock);
12096
12097 if (!lpfc_cmd->pCmd) {
12098 spin_unlock(&lpfc_cmd->buf_lock);
12099 continue;
12100 }
12101
12102 if (phba->sli_rev == LPFC_SLI_REV4) {
12103 pring_s4 =
James Smartc00f62e2019-08-14 16:57:11 -070012104 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
James Smartc2017262019-01-28 11:14:37 -080012105 if (!pring_s4) {
12106 spin_unlock(&lpfc_cmd->buf_lock);
12107 continue;
12108 }
12109 /* Note: both hbalock and ring_lock must be set here */
12110 spin_lock(&pring_s4->ring_lock);
12111 }
12112
James Smart98912dda2014-04-04 13:52:31 -040012113 /*
12114 * If the iocbq is already being aborted, don't take a second
12115 * action, but do count it.
12116 */
James Smartc2017262019-01-28 11:14:37 -080012117 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12118 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
12119 if (phba->sli_rev == LPFC_SLI_REV4)
12120 spin_unlock(&pring_s4->ring_lock);
12121 spin_unlock(&lpfc_cmd->buf_lock);
James Smart98912dda2014-04-04 13:52:31 -040012122 continue;
James Smartc2017262019-01-28 11:14:37 -080012123 }
James Smart98912dda2014-04-04 13:52:31 -040012124
12125 /* issue ABTS for this IOCB based on iotag */
12126 abtsiocbq = __lpfc_sli_get_iocbq(phba);
James Smartc2017262019-01-28 11:14:37 -080012127 if (!abtsiocbq) {
12128 if (phba->sli_rev == LPFC_SLI_REV4)
12129 spin_unlock(&pring_s4->ring_lock);
12130 spin_unlock(&lpfc_cmd->buf_lock);
James Smart98912dda2014-04-04 13:52:31 -040012131 continue;
James Smartc2017262019-01-28 11:14:37 -080012132 }
James Smart98912dda2014-04-04 13:52:31 -040012133
12134 icmd = &iocbq->iocb;
12135 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12136 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12137 if (phba->sli_rev == LPFC_SLI_REV4)
12138 abtsiocbq->iocb.un.acxri.abortIoTag =
12139 iocbq->sli4_xritag;
12140 else
12141 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12142 abtsiocbq->iocb.ulpLe = 1;
12143 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12144 abtsiocbq->vport = vport;
12145
12146 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080012147 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
James Smart98912dda2014-04-04 13:52:31 -040012148 if (iocbq->iocb_flag & LPFC_IO_FCP)
12149 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040012150 if (iocbq->iocb_flag & LPFC_IO_FOF)
12151 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
James Smart98912dda2014-04-04 13:52:31 -040012152
James Smart8c50d252014-09-03 12:58:16 -040012153 ndlp = lpfc_cmd->rdata->pnode;
12154
12155 if (lpfc_is_link_up(phba) &&
12156 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
James Smart98912dda2014-04-04 13:52:31 -040012157 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12158 else
12159 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12160
12161 /* Setup callback routine and issue the command. */
12162 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
12163
12164 /*
12165 * Indicate the IO is being aborted by the driver and set
12166 * the caller's flag into the aborted IO.
12167 */
12168 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
12169
12170 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart98912dda2014-04-04 13:52:31 -040012171 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12172 abtsiocbq, 0);
James Smart59c68ea2018-04-09 14:24:25 -070012173 spin_unlock(&pring_s4->ring_lock);
James Smart98912dda2014-04-04 13:52:31 -040012174 } else {
12175 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12176 abtsiocbq, 0);
12177 }
12178
James Smartc2017262019-01-28 11:14:37 -080012179 spin_unlock(&lpfc_cmd->buf_lock);
James Smart98912dda2014-04-04 13:52:31 -040012180
12181 if (ret_val == IOCB_ERROR)
12182 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12183 else
12184 sum++;
12185 }
James Smart59c68ea2018-04-09 14:24:25 -070012186 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart98912dda2014-04-04 13:52:31 -040012187 return sum;
12188}
12189
12190/**
James Smart3621a712009-04-06 18:47:14 -040012191 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
James Smarte59058c2008-08-24 21:49:00 -040012192 * @phba: Pointer to HBA context object.
12193 * @cmdiocbq: Pointer to command iocb.
12194 * @rspiocbq: Pointer to response iocb.
12195 *
12196 * This function is the completion handler for iocbs issued using
12197 * lpfc_sli_issue_iocb_wait function. This function is called by the
12198 * ring event handler function without any lock held. This function
12199 * can be called from both worker thread context and interrupt
12200 * context. This function also can be called from other thread which
12201 * cleans up the SLI layer objects.
12202 * This function copy the contents of the response iocb to the
12203 * response iocb memory object provided by the caller of
12204 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12205 * sleeps for the iocb completion.
12206 **/
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012207static void
12208lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12209 struct lpfc_iocbq *cmdiocbq,
12210 struct lpfc_iocbq *rspiocbq)
dea31012005-04-17 16:05:31 -050012211{
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012212 wait_queue_head_t *pdone_q;
12213 unsigned long iflags;
James Smartc4908502019-01-28 11:14:28 -080012214 struct lpfc_io_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050012215
James Smart2e0fef82007-06-17 19:56:36 -050012216 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart5a0916b2013-07-15 18:31:42 -040012217 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
12218
12219 /*
12220 * A time out has occurred for the iocb. If a time out
12221 * completion handler has been supplied, call it. Otherwise,
12222 * just free the iocbq.
12223 */
12224
12225 spin_unlock_irqrestore(&phba->hbalock, iflags);
12226 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12227 cmdiocbq->wait_iocb_cmpl = NULL;
12228 if (cmdiocbq->iocb_cmpl)
12229 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12230 else
12231 lpfc_sli_release_iocbq(phba, cmdiocbq);
12232 return;
12233 }
12234
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012235 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12236 if (cmdiocbq->context2 && rspiocbq)
12237 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12238 &rspiocbq->iocb, sizeof(IOCB_t));
12239
James Smart0f65ff62010-02-26 14:14:23 -050012240 /* Set the exchange busy flag for task management commands */
12241 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12242 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
James Smartc4908502019-01-28 11:14:28 -080012243 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
James Smart0f65ff62010-02-26 14:14:23 -050012244 cur_iocbq);
James Smart324e1c42019-10-18 14:18:21 -070012245 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12246 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12247 else
12248 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
James Smart0f65ff62010-02-26 14:14:23 -050012249 }
12250
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012251 pdone_q = cmdiocbq->context_un.wait_queue;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012252 if (pdone_q)
12253 wake_up(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -050012254 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea31012005-04-17 16:05:31 -050012255 return;
12256}
12257
James Smarte59058c2008-08-24 21:49:00 -040012258/**
James Smartd11e31d2009-06-10 17:23:06 -040012259 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12260 * @phba: Pointer to HBA context object..
12261 * @piocbq: Pointer to command iocb.
12262 * @flag: Flag to test.
12263 *
12264 * This routine grabs the hbalock and then test the iocb_flag to
12265 * see if the passed in flag is set.
12266 * Returns:
12267 * 1 if flag is set.
12268 * 0 if flag is not set.
12269 **/
12270static int
12271lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12272 struct lpfc_iocbq *piocbq, uint32_t flag)
12273{
12274 unsigned long iflags;
12275 int ret;
12276
12277 spin_lock_irqsave(&phba->hbalock, iflags);
12278 ret = piocbq->iocb_flag & flag;
12279 spin_unlock_irqrestore(&phba->hbalock, iflags);
12280 return ret;
12281
12282}
12283
12284/**
James Smart3621a712009-04-06 18:47:14 -040012285 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
James Smarte59058c2008-08-24 21:49:00 -040012286 * @phba: Pointer to HBA context object..
Lee Jones7af29d42020-07-21 17:41:31 +010012287 * @ring_number: Ring number
James Smarte59058c2008-08-24 21:49:00 -040012288 * @piocb: Pointer to command iocb.
12289 * @prspiocbq: Pointer to response iocb.
12290 * @timeout: Timeout in number of seconds.
12291 *
12292 * This function issues the iocb to firmware and waits for the
James Smart5a0916b2013-07-15 18:31:42 -040012293 * iocb to complete. The iocb_cmpl field of the shall be used
12294 * to handle iocbs which time out. If the field is NULL, the
12295 * function shall free the iocbq structure. If more clean up is
12296 * needed, the caller is expected to provide a completion function
12297 * that will provide the needed clean up. If the iocb command is
12298 * not completed within timeout seconds, the function will either
12299 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12300 * completion function set in the iocb_cmpl field and then return
12301 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12302 * resources if this function returns IOCB_TIMEDOUT.
James Smarte59058c2008-08-24 21:49:00 -040012303 * The function waits for the iocb completion using an
12304 * non-interruptible wait.
12305 * This function will sleep while waiting for iocb completion.
12306 * So, this function should not be called from any context which
12307 * does not allow sleeping. Due to the same reason, this function
12308 * cannot be called with interrupt disabled.
12309 * This function assumes that the iocb completions occur while
12310 * this function sleep. So, this function cannot be called from
12311 * the thread which process iocb completion for this ring.
12312 * This function clears the iocb_flag of the iocb object before
12313 * issuing the iocb and the iocb completion handler sets this
12314 * flag and wakes this thread when the iocb completes.
12315 * The contents of the response iocb will be copied to prspiocbq
12316 * by the completion handler when the command completes.
12317 * This function returns IOCB_SUCCESS when success.
12318 * This function is called with no lock held.
12319 **/
dea31012005-04-17 16:05:31 -050012320int
James Smart2e0fef82007-06-17 19:56:36 -050012321lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
James Smartda0436e2009-05-22 14:51:39 -040012322 uint32_t ring_number,
James Smart2e0fef82007-06-17 19:56:36 -050012323 struct lpfc_iocbq *piocb,
12324 struct lpfc_iocbq *prspiocbq,
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012325 uint32_t timeout)
dea31012005-04-17 16:05:31 -050012326{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080012327 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012328 long timeleft, timeout_req = 0;
12329 int retval = IOCB_SUCCESS;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050012330 uint32_t creg_val;
James Smart0e9bb8d2013-03-01 16:35:12 -050012331 struct lpfc_iocbq *iocb;
12332 int txq_cnt = 0;
12333 int txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080012334 struct lpfc_sli_ring *pring;
James Smart5a0916b2013-07-15 18:31:42 -040012335 unsigned long iflags;
12336 bool iocb_completed = true;
12337
James Smart895427b2017-02-12 13:52:30 -080012338 if (phba->sli_rev >= LPFC_SLI_REV4)
12339 pring = lpfc_sli4_calc_ring(phba, piocb);
12340 else
12341 pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -050012342 /*
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012343 * If the caller has provided a response iocbq buffer, then context2
12344 * is NULL or its an error.
dea31012005-04-17 16:05:31 -050012345 */
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012346 if (prspiocbq) {
12347 if (piocb->context2)
12348 return IOCB_ERROR;
12349 piocb->context2 = prspiocbq;
dea31012005-04-17 16:05:31 -050012350 }
12351
James Smart5a0916b2013-07-15 18:31:42 -040012352 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012353 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12354 piocb->context_un.wait_queue = &done_q;
James Smart5a0916b2013-07-15 18:31:42 -040012355 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea31012005-04-17 16:05:31 -050012356
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050012357 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050012358 if (lpfc_readl(phba->HCregaddr, &creg_val))
12359 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050012360 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12361 writel(creg_val, phba->HCregaddr);
12362 readl(phba->HCregaddr); /* flush */
12363 }
12364
James Smart2a9bf3d2010-06-07 15:24:45 -040012365 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12366 SLI_IOCB_RET_IOCB);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012367 if (retval == IOCB_SUCCESS) {
James Smart256ec0d2013-04-17 20:14:58 -040012368 timeout_req = msecs_to_jiffies(timeout * 1000);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012369 timeleft = wait_event_timeout(done_q,
James Smartd11e31d2009-06-10 17:23:06 -040012370 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012371 timeout_req);
James Smart5a0916b2013-07-15 18:31:42 -040012372 spin_lock_irqsave(&phba->hbalock, iflags);
12373 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
dea31012005-04-17 16:05:31 -050012374
James Smart5a0916b2013-07-15 18:31:42 -040012375 /*
12376 * IOCB timed out. Inform the wake iocb wait
12377 * completion function and set local status
12378 */
12379
12380 iocb_completed = false;
12381 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12382 }
12383 spin_unlock_irqrestore(&phba->hbalock, iflags);
12384 if (iocb_completed) {
James Smart7054a602007-04-25 09:52:34 -040012385 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040012386 "0331 IOCB wake signaled\n");
James Smart53151bb2013-10-10 12:24:07 -040012387 /* Note: we are not indicating if the IOCB has a success
12388 * status or not - that's for the caller to check.
12389 * IOCB_SUCCESS means just that the command was sent and
12390 * completed. Not that it completed successfully.
12391 * */
James Smart7054a602007-04-25 09:52:34 -040012392 } else if (timeleft == 0) {
Dick Kennedy372c1872020-06-30 14:50:00 -070012393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -040012394 "0338 IOCB wait timeout error - no "
12395 "wake response Data x%x\n", timeout);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012396 retval = IOCB_TIMEDOUT;
James Smart7054a602007-04-25 09:52:34 -040012397 } else {
Dick Kennedy372c1872020-06-30 14:50:00 -070012398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -040012399 "0330 IOCB wake NOT set, "
12400 "Data x%x x%lx\n",
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012401 timeout, (timeleft / jiffies));
12402 retval = IOCB_TIMEDOUT;
dea31012005-04-17 16:05:31 -050012403 }
James Smart2a9bf3d2010-06-07 15:24:45 -040012404 } else if (retval == IOCB_BUSY) {
James Smart0e9bb8d2013-03-01 16:35:12 -050012405 if (phba->cfg_log_verbose & LOG_SLI) {
12406 list_for_each_entry(iocb, &pring->txq, list) {
12407 txq_cnt++;
12408 }
12409 list_for_each_entry(iocb, &pring->txcmplq, list) {
12410 txcmplq_cnt++;
12411 }
12412 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12413 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12414 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12415 }
James Smart2a9bf3d2010-06-07 15:24:45 -040012416 return retval;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012417 } else {
12418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040012419 "0332 IOCB wait issue failed, Data x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040012420 retval);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012421 retval = IOCB_ERROR;
dea31012005-04-17 16:05:31 -050012422 }
12423
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050012424 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050012425 if (lpfc_readl(phba->HCregaddr, &creg_val))
12426 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050012427 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12428 writel(creg_val, phba->HCregaddr);
12429 readl(phba->HCregaddr); /* flush */
12430 }
12431
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012432 if (prspiocbq)
12433 piocb->context2 = NULL;
12434
12435 piocb->context_un.wait_queue = NULL;
12436 piocb->iocb_cmpl = NULL;
dea31012005-04-17 16:05:31 -050012437 return retval;
12438}
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040012439
James Smarte59058c2008-08-24 21:49:00 -040012440/**
James Smart3621a712009-04-06 18:47:14 -040012441 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
James Smarte59058c2008-08-24 21:49:00 -040012442 * @phba: Pointer to HBA context object.
12443 * @pmboxq: Pointer to driver mailbox object.
12444 * @timeout: Timeout in number of seconds.
12445 *
12446 * This function issues the mailbox to firmware and waits for the
12447 * mailbox command to complete. If the mailbox command is not
12448 * completed within timeout seconds, it returns MBX_TIMEOUT.
12449 * The function waits for the mailbox completion using an
12450 * interruptible wait. If the thread is woken up due to a
12451 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12452 * should not free the mailbox resources, if this function returns
12453 * MBX_TIMEOUT.
12454 * This function will sleep while waiting for mailbox completion.
12455 * So, this function should not be called from any context which
12456 * does not allow sleeping. Due to the same reason, this function
12457 * cannot be called with interrupt disabled.
12458 * This function assumes that the mailbox completion occurs while
12459 * this function sleep. So, this function cannot be called from
12460 * the worker thread which processes mailbox completion.
12461 * This function is called in the context of HBA management
12462 * applications.
12463 * This function returns MBX_SUCCESS when successful.
12464 * This function is called with no lock held.
12465 **/
dea31012005-04-17 16:05:31 -050012466int
James Smart2e0fef82007-06-17 19:56:36 -050012467lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea31012005-04-17 16:05:31 -050012468 uint32_t timeout)
12469{
James Smarte29d74f2018-03-05 12:04:07 -080012470 struct completion mbox_done;
dea31012005-04-17 16:05:31 -050012471 int retval;
James Smart858c9f62007-06-17 19:56:39 -050012472 unsigned long flag;
dea31012005-04-17 16:05:31 -050012473
James Smart495a7142008-06-14 22:52:59 -040012474 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea31012005-04-17 16:05:31 -050012475 /* setup wake call as IOCB callback */
12476 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea31012005-04-17 16:05:31 -050012477
James Smarte29d74f2018-03-05 12:04:07 -080012478 /* setup context3 field to pass wait_queue pointer to wake function */
12479 init_completion(&mbox_done);
12480 pmboxq->context3 = &mbox_done;
dea31012005-04-17 16:05:31 -050012481 /* now issue the command */
12482 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea31012005-04-17 16:05:31 -050012483 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
James Smarte29d74f2018-03-05 12:04:07 -080012484 wait_for_completion_timeout(&mbox_done,
12485 msecs_to_jiffies(timeout * 1000));
James Smart7054a602007-04-25 09:52:34 -040012486
James Smart858c9f62007-06-17 19:56:39 -050012487 spin_lock_irqsave(&phba->hbalock, flag);
James Smarte29d74f2018-03-05 12:04:07 -080012488 pmboxq->context3 = NULL;
James Smart7054a602007-04-25 09:52:34 -040012489 /*
12490 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12491 * else do not free the resources.
12492 */
James Smartd7c47992010-06-08 18:31:54 -040012493 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea31012005-04-17 16:05:31 -050012494 retval = MBX_SUCCESS;
James Smartd7c47992010-06-08 18:31:54 -040012495 } else {
James Smart7054a602007-04-25 09:52:34 -040012496 retval = MBX_TIMEOUT;
James Smart858c9f62007-06-17 19:56:39 -050012497 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12498 }
12499 spin_unlock_irqrestore(&phba->hbalock, flag);
dea31012005-04-17 16:05:31 -050012500 }
dea31012005-04-17 16:05:31 -050012501 return retval;
12502}
12503
James Smarte59058c2008-08-24 21:49:00 -040012504/**
James Smart3772a992009-05-22 14:50:54 -040012505 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
James Smarte59058c2008-08-24 21:49:00 -040012506 * @phba: Pointer to HBA context.
Lee Jones7af29d42020-07-21 17:41:31 +010012507 * @mbx_action: Mailbox shutdown options.
James Smarte59058c2008-08-24 21:49:00 -040012508 *
James Smart3772a992009-05-22 14:50:54 -040012509 * This function is called to shutdown the driver's mailbox sub-system.
12510 * It first marks the mailbox sub-system is in a block state to prevent
12511 * the asynchronous mailbox command from issued off the pending mailbox
12512 * command queue. If the mailbox command sub-system shutdown is due to
12513 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12514 * the mailbox sub-system flush routine to forcefully bring down the
12515 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12516 * as with offline or HBA function reset), this routine will wait for the
12517 * outstanding mailbox command to complete before invoking the mailbox
12518 * sub-system flush routine to gracefully bring down mailbox sub-system.
James Smarte59058c2008-08-24 21:49:00 -040012519 **/
James Smart3772a992009-05-22 14:50:54 -040012520void
James Smart618a5232012-06-12 13:54:36 -040012521lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
James Smartb4c02652006-07-06 15:50:43 -040012522{
James Smart3772a992009-05-22 14:50:54 -040012523 struct lpfc_sli *psli = &phba->sli;
James Smart3772a992009-05-22 14:50:54 -040012524 unsigned long timeout;
12525
James Smart618a5232012-06-12 13:54:36 -040012526 if (mbx_action == LPFC_MBX_NO_WAIT) {
12527 /* delay 100ms for port state */
12528 msleep(100);
12529 lpfc_sli_mbox_sys_flush(phba);
12530 return;
12531 }
James Smarta183a152011-10-10 21:32:43 -040012532 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
James Smartd7069f02012-03-01 22:36:29 -050012533
James Smart523128e2018-09-10 10:30:46 -070012534 /* Disable softirqs, including timers from obtaining phba->hbalock */
12535 local_bh_disable();
12536
James Smart3772a992009-05-22 14:50:54 -040012537 spin_lock_irq(&phba->hbalock);
12538 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smart3772a992009-05-22 14:50:54 -040012539
12540 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart3772a992009-05-22 14:50:54 -040012541 /* Determine how long we might wait for the active mailbox
12542 * command to be gracefully completed by firmware.
12543 */
James Smarta183a152011-10-10 21:32:43 -040012544 if (phba->sli.mbox_active)
12545 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12546 phba->sli.mbox_active) *
12547 1000) + jiffies;
12548 spin_unlock_irq(&phba->hbalock);
12549
James Smart523128e2018-09-10 10:30:46 -070012550 /* Enable softirqs again, done with phba->hbalock */
12551 local_bh_enable();
12552
James Smart3772a992009-05-22 14:50:54 -040012553 while (phba->sli.mbox_active) {
12554 /* Check active mailbox complete status every 2ms */
12555 msleep(2);
12556 if (time_after(jiffies, timeout))
12557 /* Timeout, let the mailbox flush routine to
12558 * forcefully release active mailbox command
12559 */
12560 break;
12561 }
James Smart523128e2018-09-10 10:30:46 -070012562 } else {
James Smartd7069f02012-03-01 22:36:29 -050012563 spin_unlock_irq(&phba->hbalock);
12564
James Smart523128e2018-09-10 10:30:46 -070012565 /* Enable softirqs again, done with phba->hbalock */
12566 local_bh_enable();
12567 }
12568
James Smart3772a992009-05-22 14:50:54 -040012569 lpfc_sli_mbox_sys_flush(phba);
12570}
12571
12572/**
12573 * lpfc_sli_eratt_read - read sli-3 error attention events
12574 * @phba: Pointer to HBA context.
12575 *
12576 * This function is called to read the SLI3 device error attention registers
12577 * for possible error attention events. The caller must hold the hostlock
12578 * with spin_lock_irq().
12579 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012580 * This function returns 1 when there is Error Attention in the Host Attention
James Smart3772a992009-05-22 14:50:54 -040012581 * Register and returns 0 otherwise.
12582 **/
12583static int
12584lpfc_sli_eratt_read(struct lpfc_hba *phba)
12585{
James Smarted957682007-06-17 19:56:37 -050012586 uint32_t ha_copy;
James Smartb4c02652006-07-06 15:50:43 -040012587
James Smart3772a992009-05-22 14:50:54 -040012588 /* Read chip Host Attention (HA) register */
James Smart9940b972011-03-11 16:06:12 -050012589 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12590 goto unplug_err;
12591
James Smart3772a992009-05-22 14:50:54 -040012592 if (ha_copy & HA_ERATT) {
12593 /* Read host status register to retrieve error event */
James Smart9940b972011-03-11 16:06:12 -050012594 if (lpfc_sli_read_hs(phba))
12595 goto unplug_err;
James Smartb4c02652006-07-06 15:50:43 -040012596
James Smart3772a992009-05-22 14:50:54 -040012597 /* Check if there is a deferred error condition is active */
12598 if ((HS_FFER1 & phba->work_hs) &&
12599 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040012600 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
James Smart3772a992009-05-22 14:50:54 -040012601 phba->hba_flag |= DEFER_ERATT;
James Smart3772a992009-05-22 14:50:54 -040012602 /* Clear all interrupt enable conditions */
12603 writel(0, phba->HCregaddr);
12604 readl(phba->HCregaddr);
12605 }
12606
12607 /* Set the driver HA work bitmap */
James Smart3772a992009-05-22 14:50:54 -040012608 phba->work_ha |= HA_ERATT;
12609 /* Indicate polling handles this ERATT */
12610 phba->hba_flag |= HBA_ERATT_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040012611 return 1;
James Smartb4c02652006-07-06 15:50:43 -040012612 }
James Smart3772a992009-05-22 14:50:54 -040012613 return 0;
James Smart9940b972011-03-11 16:06:12 -050012614
12615unplug_err:
12616 /* Set the driver HS work bitmap */
12617 phba->work_hs |= UNPLUG_ERR;
12618 /* Set the driver HA work bitmap */
12619 phba->work_ha |= HA_ERATT;
12620 /* Indicate polling handles this ERATT */
12621 phba->hba_flag |= HBA_ERATT_HANDLED;
12622 return 1;
James Smartb4c02652006-07-06 15:50:43 -040012623}
12624
James Smarte59058c2008-08-24 21:49:00 -040012625/**
James Smartda0436e2009-05-22 14:51:39 -040012626 * lpfc_sli4_eratt_read - read sli-4 error attention events
12627 * @phba: Pointer to HBA context.
12628 *
12629 * This function is called to read the SLI4 device error attention registers
12630 * for possible error attention events. The caller must hold the hostlock
12631 * with spin_lock_irq().
12632 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012633 * This function returns 1 when there is Error Attention in the Host Attention
James Smartda0436e2009-05-22 14:51:39 -040012634 * Register and returns 0 otherwise.
12635 **/
12636static int
12637lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12638{
12639 uint32_t uerr_sta_hi, uerr_sta_lo;
James Smart2fcee4b2010-12-15 17:57:46 -050012640 uint32_t if_type, portsmphr;
12641 struct lpfc_register portstat_reg;
James Smartda0436e2009-05-22 14:51:39 -040012642
James Smart2fcee4b2010-12-15 17:57:46 -050012643 /*
12644 * For now, use the SLI4 device internal unrecoverable error
James Smartda0436e2009-05-22 14:51:39 -040012645 * registers for error attention. This can be changed later.
12646 */
James Smart2fcee4b2010-12-15 17:57:46 -050012647 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12648 switch (if_type) {
12649 case LPFC_SLI_INTF_IF_TYPE_0:
James Smart9940b972011-03-11 16:06:12 -050012650 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12651 &uerr_sta_lo) ||
12652 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12653 &uerr_sta_hi)) {
12654 phba->work_hs |= UNPLUG_ERR;
12655 phba->work_ha |= HA_ERATT;
12656 phba->hba_flag |= HBA_ERATT_HANDLED;
12657 return 1;
12658 }
James Smart2fcee4b2010-12-15 17:57:46 -050012659 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12660 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070012661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -050012662 "1423 HBA Unrecoverable error: "
12663 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12664 "ue_mask_lo_reg=0x%x, "
12665 "ue_mask_hi_reg=0x%x\n",
12666 uerr_sta_lo, uerr_sta_hi,
12667 phba->sli4_hba.ue_mask_lo,
12668 phba->sli4_hba.ue_mask_hi);
12669 phba->work_status[0] = uerr_sta_lo;
12670 phba->work_status[1] = uerr_sta_hi;
12671 phba->work_ha |= HA_ERATT;
12672 phba->hba_flag |= HBA_ERATT_HANDLED;
12673 return 1;
12674 }
12675 break;
12676 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart27d6ac02018-02-22 08:18:42 -080012677 case LPFC_SLI_INTF_IF_TYPE_6:
James Smart9940b972011-03-11 16:06:12 -050012678 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12679 &portstat_reg.word0) ||
12680 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12681 &portsmphr)){
12682 phba->work_hs |= UNPLUG_ERR;
12683 phba->work_ha |= HA_ERATT;
12684 phba->hba_flag |= HBA_ERATT_HANDLED;
12685 return 1;
12686 }
James Smart2fcee4b2010-12-15 17:57:46 -050012687 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12688 phba->work_status[0] =
12689 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12690 phba->work_status[1] =
12691 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
Dick Kennedy372c1872020-06-30 14:50:00 -070012692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2e90f4b2011-12-13 13:22:37 -050012693 "2885 Port Status Event: "
James Smart2fcee4b2010-12-15 17:57:46 -050012694 "port status reg 0x%x, "
12695 "port smphr reg 0x%x, "
12696 "error 1=0x%x, error 2=0x%x\n",
12697 portstat_reg.word0,
12698 portsmphr,
12699 phba->work_status[0],
12700 phba->work_status[1]);
12701 phba->work_ha |= HA_ERATT;
12702 phba->hba_flag |= HBA_ERATT_HANDLED;
12703 return 1;
12704 }
12705 break;
12706 case LPFC_SLI_INTF_IF_TYPE_1:
12707 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070012708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -050012709 "2886 HBA Error Attention on unsupported "
12710 "if type %d.", if_type);
James Smarta747c9c2009-11-18 15:41:10 -050012711 return 1;
James Smartda0436e2009-05-22 14:51:39 -040012712 }
James Smart2fcee4b2010-12-15 17:57:46 -050012713
James Smartda0436e2009-05-22 14:51:39 -040012714 return 0;
12715}
12716
12717/**
James Smart3621a712009-04-06 18:47:14 -040012718 * lpfc_sli_check_eratt - check error attention events
James Smart93996272008-08-24 21:50:30 -040012719 * @phba: Pointer to HBA context.
12720 *
James Smart3772a992009-05-22 14:50:54 -040012721 * This function is called from timer soft interrupt context to check HBA's
James Smart93996272008-08-24 21:50:30 -040012722 * error attention register bit for error attention events.
12723 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012724 * This function returns 1 when there is Error Attention in the Host Attention
James Smart93996272008-08-24 21:50:30 -040012725 * Register and returns 0 otherwise.
12726 **/
12727int
12728lpfc_sli_check_eratt(struct lpfc_hba *phba)
12729{
12730 uint32_t ha_copy;
12731
12732 /* If somebody is waiting to handle an eratt, don't process it
12733 * here. The brdkill function will do this.
12734 */
12735 if (phba->link_flag & LS_IGNORE_ERATT)
12736 return 0;
12737
12738 /* Check if interrupt handler handles this ERATT */
12739 spin_lock_irq(&phba->hbalock);
12740 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12741 /* Interrupt handler has handled ERATT */
12742 spin_unlock_irq(&phba->hbalock);
12743 return 0;
12744 }
12745
James Smarta257bf92009-04-06 18:48:10 -040012746 /*
12747 * If there is deferred error attention, do not check for error
12748 * attention
12749 */
12750 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12751 spin_unlock_irq(&phba->hbalock);
12752 return 0;
12753 }
12754
James Smart3772a992009-05-22 14:50:54 -040012755 /* If PCI channel is offline, don't process it */
12756 if (unlikely(pci_channel_offline(phba->pcidev))) {
James Smart93996272008-08-24 21:50:30 -040012757 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040012758 return 0;
12759 }
12760
12761 switch (phba->sli_rev) {
12762 case LPFC_SLI_REV2:
12763 case LPFC_SLI_REV3:
12764 /* Read chip Host Attention (HA) register */
12765 ha_copy = lpfc_sli_eratt_read(phba);
12766 break;
James Smartda0436e2009-05-22 14:51:39 -040012767 case LPFC_SLI_REV4:
James Smart2fcee4b2010-12-15 17:57:46 -050012768 /* Read device Uncoverable Error (UERR) registers */
James Smartda0436e2009-05-22 14:51:39 -040012769 ha_copy = lpfc_sli4_eratt_read(phba);
12770 break;
James Smart3772a992009-05-22 14:50:54 -040012771 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070012772 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040012773 "0299 Invalid SLI revision (%d)\n",
12774 phba->sli_rev);
12775 ha_copy = 0;
12776 break;
James Smart93996272008-08-24 21:50:30 -040012777 }
12778 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040012779
12780 return ha_copy;
12781}
12782
12783/**
12784 * lpfc_intr_state_check - Check device state for interrupt handling
12785 * @phba: Pointer to HBA context.
12786 *
12787 * This inline routine checks whether a device or its PCI slot is in a state
12788 * that the interrupt should be handled.
12789 *
12790 * This function returns 0 if the device or the PCI slot is in a state that
12791 * interrupt should be handled, otherwise -EIO.
12792 */
12793static inline int
12794lpfc_intr_state_check(struct lpfc_hba *phba)
12795{
12796 /* If the pci channel is offline, ignore all the interrupts */
12797 if (unlikely(pci_channel_offline(phba->pcidev)))
12798 return -EIO;
12799
12800 /* Update device level interrupt statistics */
12801 phba->sli.slistat.sli_intr++;
12802
12803 /* Ignore all interrupts during initialization. */
12804 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12805 return -EIO;
12806
James Smart93996272008-08-24 21:50:30 -040012807 return 0;
12808}
12809
12810/**
James Smart3772a992009-05-22 14:50:54 -040012811 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
James Smarte59058c2008-08-24 21:49:00 -040012812 * @irq: Interrupt number.
12813 * @dev_id: The device context pointer.
12814 *
James Smart93996272008-08-24 21:50:30 -040012815 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040012816 * service routine when device with SLI-3 interface spec is enabled with
12817 * MSI-X multi-message interrupt mode and there are slow-path events in
12818 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12819 * interrupt mode, this function is called as part of the device-level
12820 * interrupt handler. When the PCI slot is in error recovery or the HBA
12821 * is undergoing initialization, the interrupt handler will not process
12822 * the interrupt. The link attention and ELS ring attention events are
12823 * handled by the worker thread. The interrupt handler signals the worker
12824 * thread and returns for these events. This function is called without
12825 * any lock held. It gets the hbalock to access and update SLI data
James Smart93996272008-08-24 21:50:30 -040012826 * structures.
12827 *
12828 * This function returns IRQ_HANDLED when interrupt is handled else it
12829 * returns IRQ_NONE.
James Smarte59058c2008-08-24 21:49:00 -040012830 **/
dea31012005-04-17 16:05:31 -050012831irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040012832lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea31012005-04-17 16:05:31 -050012833{
James Smart2e0fef82007-06-17 19:56:36 -050012834 struct lpfc_hba *phba;
James Smarta747c9c2009-11-18 15:41:10 -050012835 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -050012836 uint32_t work_ha_copy;
12837 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050012838 unsigned long iflag;
dea31012005-04-17 16:05:31 -050012839 uint32_t control;
12840
James Smart92d7f7b2007-06-17 19:56:38 -050012841 MAILBOX_t *mbox, *pmbox;
James Smart858c9f62007-06-17 19:56:39 -050012842 struct lpfc_vport *vport;
12843 struct lpfc_nodelist *ndlp;
12844 struct lpfc_dmabuf *mp;
James Smart92d7f7b2007-06-17 19:56:38 -050012845 LPFC_MBOXQ_t *pmb;
12846 int rc;
12847
dea31012005-04-17 16:05:31 -050012848 /*
12849 * Get the driver's phba structure from the dev_id and
12850 * assume the HBA is not interrupting.
12851 */
James Smart93996272008-08-24 21:50:30 -040012852 phba = (struct lpfc_hba *)dev_id;
dea31012005-04-17 16:05:31 -050012853
12854 if (unlikely(!phba))
12855 return IRQ_NONE;
12856
dea31012005-04-17 16:05:31 -050012857 /*
James Smart93996272008-08-24 21:50:30 -040012858 * Stuff needs to be attented to when this function is invoked as an
12859 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050012860 */
James Smart93996272008-08-24 21:50:30 -040012861 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040012862 /* Check device state for handling interrupt */
12863 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012864 return IRQ_NONE;
12865 /* Need to read HA REG for slow-path events */
James Smart5b75da22008-12-04 22:39:35 -050012866 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050012867 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12868 goto unplug_error;
James Smart93996272008-08-24 21:50:30 -040012869 /* If somebody is waiting to handle an eratt don't process it
12870 * here. The brdkill function will do this.
12871 */
12872 if (phba->link_flag & LS_IGNORE_ERATT)
12873 ha_copy &= ~HA_ERATT;
12874 /* Check the need for handling ERATT in interrupt handler */
12875 if (ha_copy & HA_ERATT) {
12876 if (phba->hba_flag & HBA_ERATT_HANDLED)
12877 /* ERATT polling has handled ERATT */
12878 ha_copy &= ~HA_ERATT;
12879 else
12880 /* Indicate interrupt handler handles ERATT */
12881 phba->hba_flag |= HBA_ERATT_HANDLED;
12882 }
James Smarta257bf92009-04-06 18:48:10 -040012883
12884 /*
12885 * If there is deferred error attention, do not check for any
12886 * interrupt.
12887 */
12888 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040012889 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012890 return IRQ_NONE;
12891 }
12892
James Smart93996272008-08-24 21:50:30 -040012893 /* Clear up only attention source related to slow-path */
James Smart9940b972011-03-11 16:06:12 -050012894 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12895 goto unplug_error;
12896
James Smarta747c9c2009-11-18 15:41:10 -050012897 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12898 HC_LAINT_ENA | HC_ERINT_ENA),
12899 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012900 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12901 phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050012902 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012903 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050012904 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040012905 } else
12906 ha_copy = phba->ha_copy;
dea31012005-04-17 16:05:31 -050012907
dea31012005-04-17 16:05:31 -050012908 work_ha_copy = ha_copy & phba->work_ha_mask;
12909
James Smart93996272008-08-24 21:50:30 -040012910 if (work_ha_copy) {
dea31012005-04-17 16:05:31 -050012911 if (work_ha_copy & HA_LATT) {
12912 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12913 /*
12914 * Turn off Link Attention interrupts
12915 * until CLEAR_LA done
12916 */
James Smart5b75da22008-12-04 22:39:35 -050012917 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012918 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
James Smart9940b972011-03-11 16:06:12 -050012919 if (lpfc_readl(phba->HCregaddr, &control))
12920 goto unplug_error;
dea31012005-04-17 16:05:31 -050012921 control &= ~HC_LAINT_ENA;
12922 writel(control, phba->HCregaddr);
12923 readl(phba->HCregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050012924 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012925 }
12926 else
12927 work_ha_copy &= ~HA_LATT;
12928 }
12929
James Smart93996272008-08-24 21:50:30 -040012930 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
James Smart858c9f62007-06-17 19:56:39 -050012931 /*
12932 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12933 * the only slow ring.
12934 */
12935 status = (work_ha_copy &
12936 (HA_RXMASK << (4*LPFC_ELS_RING)));
12937 status >>= (4*LPFC_ELS_RING);
12938 if (status & HA_RXMASK) {
James Smart5b75da22008-12-04 22:39:35 -050012939 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050012940 if (lpfc_readl(phba->HCregaddr, &control))
12941 goto unplug_error;
James Smarta58cbd52007-08-02 11:09:43 -040012942
12943 lpfc_debugfs_slow_ring_trc(phba,
12944 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12945 control, status,
12946 (uint32_t)phba->sli.slistat.sli_intr);
12947
James Smart858c9f62007-06-17 19:56:39 -050012948 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
James Smarta58cbd52007-08-02 11:09:43 -040012949 lpfc_debugfs_slow_ring_trc(phba,
12950 "ISR Disable ring:"
12951 "pwork:x%x hawork:x%x wait:x%x",
12952 phba->work_ha, work_ha_copy,
12953 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040012954 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040012955
James Smart858c9f62007-06-17 19:56:39 -050012956 control &=
12957 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea31012005-04-17 16:05:31 -050012958 writel(control, phba->HCregaddr);
12959 readl(phba->HCregaddr); /* flush */
dea31012005-04-17 16:05:31 -050012960 }
James Smarta58cbd52007-08-02 11:09:43 -040012961 else {
12962 lpfc_debugfs_slow_ring_trc(phba,
12963 "ISR slow ring: pwork:"
12964 "x%x hawork:x%x wait:x%x",
12965 phba->work_ha, work_ha_copy,
12966 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040012967 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040012968 }
James Smart5b75da22008-12-04 22:39:35 -050012969 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050012970 }
12971 }
James Smart5b75da22008-12-04 22:39:35 -050012972 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040012973 if (work_ha_copy & HA_ERATT) {
James Smart9940b972011-03-11 16:06:12 -050012974 if (lpfc_sli_read_hs(phba))
12975 goto unplug_error;
James Smarta257bf92009-04-06 18:48:10 -040012976 /*
12977 * Check if there is a deferred error condition
12978 * is active
12979 */
12980 if ((HS_FFER1 & phba->work_hs) &&
12981 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040012982 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12983 phba->work_hs)) {
James Smarta257bf92009-04-06 18:48:10 -040012984 phba->hba_flag |= DEFER_ERATT;
12985 /* Clear all interrupt enable conditions */
12986 writel(0, phba->HCregaddr);
12987 readl(phba->HCregaddr);
12988 }
12989 }
12990
James Smart93996272008-08-24 21:50:30 -040012991 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
James Smart92d7f7b2007-06-17 19:56:38 -050012992 pmb = phba->sli.mbox_active;
James Smart04c68492009-05-22 14:52:52 -040012993 pmbox = &pmb->u.mb;
James Smart34b02dc2008-08-24 21:49:55 -040012994 mbox = phba->mbox;
James Smart858c9f62007-06-17 19:56:39 -050012995 vport = pmb->vport;
James Smart92d7f7b2007-06-17 19:56:38 -050012996
12997 /* First check out the status word */
12998 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12999 if (pmbox->mbxOwner != OWN_HOST) {
James Smart5b75da22008-12-04 22:39:35 -050013000 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -050013001 /*
13002 * Stray Mailbox Interrupt, mbxCommand <cmd>
13003 * mbxStatus <status>
13004 */
Dick Kennedy372c1872020-06-30 14:50:00 -070013005 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -040013006 "(%d):0304 Stray Mailbox "
James Smart92d7f7b2007-06-17 19:56:38 -050013007 "Interrupt mbxCommand x%x "
13008 "mbxStatus x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040013009 (vport ? vport->vpi : 0),
James Smart92d7f7b2007-06-17 19:56:38 -050013010 pmbox->mbxCommand,
13011 pmbox->mbxStatus);
James Smart09372822008-01-11 01:52:54 -050013012 /* clear mailbox attention bit */
13013 work_ha_copy &= ~HA_MBATT;
13014 } else {
James Smart97eab632008-04-07 10:16:05 -040013015 phba->sli.mbox_active = NULL;
James Smart5b75da22008-12-04 22:39:35 -050013016 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart09372822008-01-11 01:52:54 -050013017 phba->last_completion_time = jiffies;
13018 del_timer(&phba->sli.mbox_tmo);
James Smart09372822008-01-11 01:52:54 -050013019 if (pmb->mbox_cmpl) {
13020 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13021 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -040013022 if (pmb->out_ext_byte_len &&
James Smart3e1f0712018-11-29 16:09:29 -080013023 pmb->ctx_buf)
James Smart7a470272010-03-15 11:25:20 -040013024 lpfc_sli_pcimem_bcopy(
13025 phba->mbox_ext,
James Smart3e1f0712018-11-29 16:09:29 -080013026 pmb->ctx_buf,
James Smart7a470272010-03-15 11:25:20 -040013027 pmb->out_ext_byte_len);
James Smart858c9f62007-06-17 19:56:39 -050013028 }
James Smart09372822008-01-11 01:52:54 -050013029 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13030 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13031
13032 lpfc_debugfs_disc_trc(vport,
13033 LPFC_DISC_TRC_MBOX_VPORT,
13034 "MBOX dflt rpi: : "
13035 "status:x%x rpi:x%x",
13036 (uint32_t)pmbox->mbxStatus,
13037 pmbox->un.varWords[0], 0);
13038
13039 if (!pmbox->mbxStatus) {
13040 mp = (struct lpfc_dmabuf *)
James Smart3e1f0712018-11-29 16:09:29 -080013041 (pmb->ctx_buf);
James Smart09372822008-01-11 01:52:54 -050013042 ndlp = (struct lpfc_nodelist *)
James Smart3e1f0712018-11-29 16:09:29 -080013043 pmb->ctx_ndlp;
James Smart09372822008-01-11 01:52:54 -050013044
13045 /* Reg_LOGIN of dflt RPI was
13046 * successful. new lets get
13047 * rid of the RPI using the
13048 * same mbox buffer.
13049 */
13050 lpfc_unreg_login(phba,
13051 vport->vpi,
13052 pmbox->un.varWords[0],
13053 pmb);
13054 pmb->mbox_cmpl =
13055 lpfc_mbx_cmpl_dflt_rpi;
James Smart3e1f0712018-11-29 16:09:29 -080013056 pmb->ctx_buf = mp;
13057 pmb->ctx_ndlp = ndlp;
James Smart09372822008-01-11 01:52:54 -050013058 pmb->vport = vport;
James Smart58da1ff2008-04-07 10:15:56 -040013059 rc = lpfc_sli_issue_mbox(phba,
13060 pmb,
13061 MBX_NOWAIT);
13062 if (rc != MBX_BUSY)
13063 lpfc_printf_log(phba,
13064 KERN_ERR,
Dick Kennedy372c1872020-06-30 14:50:00 -070013065 LOG_TRACE_EVENT,
James Smartd7c255b2008-08-24 21:50:00 -040013066 "0350 rc should have"
James Smart6a9c52c2009-10-02 15:16:51 -040013067 "been MBX_BUSY\n");
James Smart3772a992009-05-22 14:50:54 -040013068 if (rc != MBX_NOT_FINISHED)
13069 goto send_current_mbox;
James Smart09372822008-01-11 01:52:54 -050013070 }
13071 }
James Smart5b75da22008-12-04 22:39:35 -050013072 spin_lock_irqsave(
13073 &phba->pport->work_port_lock,
13074 iflag);
James Smart09372822008-01-11 01:52:54 -050013075 phba->pport->work_port_events &=
13076 ~WORKER_MBOX_TMO;
James Smart5b75da22008-12-04 22:39:35 -050013077 spin_unlock_irqrestore(
13078 &phba->pport->work_port_lock,
13079 iflag);
James Smarta22d73b2021-01-04 10:02:38 -080013080
13081 /* Do NOT queue MBX_HEARTBEAT to the worker
13082 * thread for processing.
13083 */
13084 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13085 /* Process mbox now */
13086 phba->sli.mbox_active = NULL;
13087 phba->sli.sli_flag &=
13088 ~LPFC_SLI_MBOX_ACTIVE;
13089 if (pmb->mbox_cmpl)
13090 pmb->mbox_cmpl(phba, pmb);
13091 } else {
13092 /* Queue to worker thread to process */
13093 lpfc_mbox_cmpl_put(phba, pmb);
13094 }
James Smart858c9f62007-06-17 19:56:39 -050013095 }
James Smart97eab632008-04-07 10:16:05 -040013096 } else
James Smart5b75da22008-12-04 22:39:35 -050013097 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040013098
James Smart92d7f7b2007-06-17 19:56:38 -050013099 if ((work_ha_copy & HA_MBATT) &&
13100 (phba->sli.mbox_active == NULL)) {
James Smart858c9f62007-06-17 19:56:39 -050013101send_current_mbox:
James Smart92d7f7b2007-06-17 19:56:38 -050013102 /* Process next mailbox command if there is one */
James Smart58da1ff2008-04-07 10:15:56 -040013103 do {
13104 rc = lpfc_sli_issue_mbox(phba, NULL,
13105 MBX_NOWAIT);
13106 } while (rc == MBX_NOT_FINISHED);
13107 if (rc != MBX_SUCCESS)
Dick Kennedy372c1872020-06-30 14:50:00 -070013108 lpfc_printf_log(phba, KERN_ERR,
13109 LOG_TRACE_EVENT,
13110 "0349 rc should be "
James Smart6a9c52c2009-10-02 15:16:51 -040013111 "MBX_SUCCESS\n");
James Smart92d7f7b2007-06-17 19:56:38 -050013112 }
13113
James Smart5b75da22008-12-04 22:39:35 -050013114 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050013115 phba->work_ha |= work_ha_copy;
James Smart5b75da22008-12-04 22:39:35 -050013116 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart5e9d9b82008-06-14 22:52:53 -040013117 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -050013118 }
James Smart93996272008-08-24 21:50:30 -040013119 return IRQ_HANDLED;
James Smart9940b972011-03-11 16:06:12 -050013120unplug_error:
13121 spin_unlock_irqrestore(&phba->hbalock, iflag);
13122 return IRQ_HANDLED;
dea31012005-04-17 16:05:31 -050013123
James Smart3772a992009-05-22 14:50:54 -040013124} /* lpfc_sli_sp_intr_handler */
James Smart93996272008-08-24 21:50:30 -040013125
13126/**
James Smart3772a992009-05-22 14:50:54 -040013127 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
James Smart93996272008-08-24 21:50:30 -040013128 * @irq: Interrupt number.
13129 * @dev_id: The device context pointer.
13130 *
13131 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040013132 * service routine when device with SLI-3 interface spec is enabled with
13133 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13134 * ring event in the HBA. However, when the device is enabled with either
13135 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13136 * device-level interrupt handler. When the PCI slot is in error recovery
13137 * or the HBA is undergoing initialization, the interrupt handler will not
13138 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13139 * the intrrupt context. This function is called without any lock held.
13140 * It gets the hbalock to access and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040013141 *
13142 * This function returns IRQ_HANDLED when interrupt is handled else it
13143 * returns IRQ_NONE.
13144 **/
13145irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040013146lpfc_sli_fp_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040013147{
13148 struct lpfc_hba *phba;
13149 uint32_t ha_copy;
13150 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050013151 unsigned long iflag;
James Smart895427b2017-02-12 13:52:30 -080013152 struct lpfc_sli_ring *pring;
James Smart93996272008-08-24 21:50:30 -040013153
13154 /* Get the driver's phba structure from the dev_id and
13155 * assume the HBA is not interrupting.
13156 */
13157 phba = (struct lpfc_hba *) dev_id;
13158
13159 if (unlikely(!phba))
13160 return IRQ_NONE;
dea31012005-04-17 16:05:31 -050013161
13162 /*
James Smart93996272008-08-24 21:50:30 -040013163 * Stuff needs to be attented to when this function is invoked as an
13164 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050013165 */
James Smart93996272008-08-24 21:50:30 -040013166 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040013167 /* Check device state for handling interrupt */
13168 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040013169 return IRQ_NONE;
13170 /* Need to read HA REG for FCP ring and other ring events */
James Smart9940b972011-03-11 16:06:12 -050013171 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13172 return IRQ_HANDLED;
James Smart93996272008-08-24 21:50:30 -040013173 /* Clear up only attention source related to fast-path */
James Smart5b75da22008-12-04 22:39:35 -050013174 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040013175 /*
13176 * If there is deferred error attention, do not check for
13177 * any interrupt.
13178 */
13179 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040013180 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040013181 return IRQ_NONE;
13182 }
James Smart93996272008-08-24 21:50:30 -040013183 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13184 phba->HAregaddr);
13185 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050013186 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040013187 } else
13188 ha_copy = phba->ha_copy;
13189
13190 /*
13191 * Process all events on FCP ring. Take the optimized path for FCP IO.
13192 */
13193 ha_copy &= ~(phba->work_ha_mask);
13194
13195 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea31012005-04-17 16:05:31 -050013196 status >>= (4*LPFC_FCP_RING);
James Smart895427b2017-02-12 13:52:30 -080013197 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
James Smart858c9f62007-06-17 19:56:39 -050013198 if (status & HA_RXMASK)
James Smart895427b2017-02-12 13:52:30 -080013199 lpfc_sli_handle_fast_ring_event(phba, pring, status);
James Smarta4bc3372006-12-02 13:34:16 -050013200
13201 if (phba->cfg_multi_ring_support == 2) {
13202 /*
James Smart93996272008-08-24 21:50:30 -040013203 * Process all events on extra ring. Take the optimized path
13204 * for extra ring IO.
James Smarta4bc3372006-12-02 13:34:16 -050013205 */
James Smart93996272008-08-24 21:50:30 -040013206 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
James Smarta4bc3372006-12-02 13:34:16 -050013207 status >>= (4*LPFC_EXTRA_RING);
James Smart858c9f62007-06-17 19:56:39 -050013208 if (status & HA_RXMASK) {
James Smarta4bc3372006-12-02 13:34:16 -050013209 lpfc_sli_handle_fast_ring_event(phba,
James Smart895427b2017-02-12 13:52:30 -080013210 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
James Smarta4bc3372006-12-02 13:34:16 -050013211 status);
13212 }
13213 }
dea31012005-04-17 16:05:31 -050013214 return IRQ_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040013215} /* lpfc_sli_fp_intr_handler */
dea31012005-04-17 16:05:31 -050013216
James Smart93996272008-08-24 21:50:30 -040013217/**
James Smart3772a992009-05-22 14:50:54 -040013218 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
James Smart93996272008-08-24 21:50:30 -040013219 * @irq: Interrupt number.
13220 * @dev_id: The device context pointer.
13221 *
James Smart3772a992009-05-22 14:50:54 -040013222 * This function is the HBA device-level interrupt handler to device with
13223 * SLI-3 interface spec, called from the PCI layer when either MSI or
13224 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13225 * requires driver attention. This function invokes the slow-path interrupt
13226 * attention handling function and fast-path interrupt attention handling
13227 * function in turn to process the relevant HBA attention events. This
13228 * function is called without any lock held. It gets the hbalock to access
13229 * and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040013230 *
13231 * This function returns IRQ_HANDLED when interrupt is handled, else it
13232 * returns IRQ_NONE.
13233 **/
13234irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040013235lpfc_sli_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040013236{
13237 struct lpfc_hba *phba;
13238 irqreturn_t sp_irq_rc, fp_irq_rc;
13239 unsigned long status1, status2;
James Smarta747c9c2009-11-18 15:41:10 -050013240 uint32_t hc_copy;
James Smart93996272008-08-24 21:50:30 -040013241
13242 /*
13243 * Get the driver's phba structure from the dev_id and
13244 * assume the HBA is not interrupting.
13245 */
13246 phba = (struct lpfc_hba *) dev_id;
13247
13248 if (unlikely(!phba))
13249 return IRQ_NONE;
13250
James Smart3772a992009-05-22 14:50:54 -040013251 /* Check device state for handling interrupt */
13252 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040013253 return IRQ_NONE;
13254
13255 spin_lock(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -050013256 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13257 spin_unlock(&phba->hbalock);
13258 return IRQ_HANDLED;
13259 }
13260
James Smart93996272008-08-24 21:50:30 -040013261 if (unlikely(!phba->ha_copy)) {
13262 spin_unlock(&phba->hbalock);
13263 return IRQ_NONE;
13264 } else if (phba->ha_copy & HA_ERATT) {
13265 if (phba->hba_flag & HBA_ERATT_HANDLED)
13266 /* ERATT polling has handled ERATT */
13267 phba->ha_copy &= ~HA_ERATT;
13268 else
13269 /* Indicate interrupt handler handles ERATT */
13270 phba->hba_flag |= HBA_ERATT_HANDLED;
13271 }
13272
James Smarta257bf92009-04-06 18:48:10 -040013273 /*
13274 * If there is deferred error attention, do not check for any interrupt.
13275 */
13276 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020013277 spin_unlock(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -040013278 return IRQ_NONE;
13279 }
13280
James Smart93996272008-08-24 21:50:30 -040013281 /* Clear attention sources except link and error attentions */
James Smart9940b972011-03-11 16:06:12 -050013282 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13283 spin_unlock(&phba->hbalock);
13284 return IRQ_HANDLED;
13285 }
James Smarta747c9c2009-11-18 15:41:10 -050013286 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13287 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13288 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040013289 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050013290 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040013291 readl(phba->HAregaddr); /* flush */
13292 spin_unlock(&phba->hbalock);
13293
13294 /*
13295 * Invokes slow-path host attention interrupt handling as appropriate.
13296 */
13297
13298 /* status of events with mailbox and link attention */
13299 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13300
13301 /* status of events with ELS ring */
13302 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13303 status2 >>= (4*LPFC_ELS_RING);
13304
13305 if (status1 || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040013306 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040013307 else
13308 sp_irq_rc = IRQ_NONE;
13309
13310 /*
13311 * Invoke fast-path host attention interrupt handling as appropriate.
13312 */
13313
13314 /* status of events with FCP ring */
13315 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13316 status1 >>= (4*LPFC_FCP_RING);
13317
13318 /* status of events with extra ring */
13319 if (phba->cfg_multi_ring_support == 2) {
13320 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13321 status2 >>= (4*LPFC_EXTRA_RING);
13322 } else
13323 status2 = 0;
13324
13325 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040013326 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040013327 else
13328 fp_irq_rc = IRQ_NONE;
13329
13330 /* Return device-level interrupt handling status */
13331 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
James Smart3772a992009-05-22 14:50:54 -040013332} /* lpfc_sli_intr_handler */
James Smart4f774512009-05-22 14:52:35 -040013333
13334/**
James Smart4f774512009-05-22 14:52:35 -040013335 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13336 * @phba: pointer to lpfc hba data structure.
13337 *
13338 * This routine is invoked by the worker thread to process all the pending
13339 * SLI4 els abort xri events.
13340 **/
13341void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13342{
13343 struct lpfc_cq_event *cq_event;
James Smarte7dab162020-10-20 13:27:12 -070013344 unsigned long iflags;
James Smart4f774512009-05-22 14:52:35 -040013345
13346 /* First, declare the els xri abort event has been handled */
James Smarte7dab162020-10-20 13:27:12 -070013347 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013348 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
James Smarte7dab162020-10-20 13:27:12 -070013349 spin_unlock_irqrestore(&phba->hbalock, iflags);
13350
James Smart4f774512009-05-22 14:52:35 -040013351 /* Now, handle all the els xri abort events */
James Smarte7dab162020-10-20 13:27:12 -070013352 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013353 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13354 /* Get the first event from the head of the event queue */
James Smart4f774512009-05-22 14:52:35 -040013355 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13356 cq_event, struct lpfc_cq_event, list);
James Smarte7dab162020-10-20 13:27:12 -070013357 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13358 iflags);
James Smart4f774512009-05-22 14:52:35 -040013359 /* Notify aborted XRI for ELS work queue */
13360 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
James Smarte7dab162020-10-20 13:27:12 -070013361
James Smart4f774512009-05-22 14:52:35 -040013362 /* Free the event processed back to the free pool */
13363 lpfc_sli4_cq_event_release(phba, cq_event);
James Smarte7dab162020-10-20 13:27:12 -070013364 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13365 iflags);
James Smart4f774512009-05-22 14:52:35 -040013366 }
James Smarte7dab162020-10-20 13:27:12 -070013367 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013368}
13369
James Smart341af102010-01-26 23:07:37 -050013370/**
13371 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13372 * @phba: pointer to lpfc hba data structure
13373 * @pIocbIn: pointer to the rspiocbq
13374 * @pIocbOut: pointer to the cmdiocbq
13375 * @wcqe: pointer to the complete wcqe
13376 *
13377 * This routine transfers the fields of a command iocbq to a response iocbq
13378 * by copying all the IOCB fields from command iocbq and transferring the
13379 * completion status information from the complete wcqe.
13380 **/
James Smart4f774512009-05-22 14:52:35 -040013381static void
James Smart341af102010-01-26 23:07:37 -050013382lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13383 struct lpfc_iocbq *pIocbIn,
James Smart4f774512009-05-22 14:52:35 -040013384 struct lpfc_iocbq *pIocbOut,
13385 struct lpfc_wcqe_complete *wcqe)
13386{
James Smartaf227412013-10-10 12:23:10 -040013387 int numBdes, i;
James Smart341af102010-01-26 23:07:37 -050013388 unsigned long iflags;
James Smartaf227412013-10-10 12:23:10 -040013389 uint32_t status, max_response;
13390 struct lpfc_dmabuf *dmabuf;
13391 struct ulp_bde64 *bpl, bde;
James Smart4f774512009-05-22 14:52:35 -040013392 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13393
13394 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13395 sizeof(struct lpfc_iocbq) - offset);
James Smart4f774512009-05-22 14:52:35 -040013396 /* Map WCQE parameters into irspiocb parameters */
James Smartacd68592012-01-18 16:25:09 -050013397 status = bf_get(lpfc_wcqe_c_status, wcqe);
13398 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
James Smart4f774512009-05-22 14:52:35 -040013399 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13400 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13401 pIocbIn->iocb.un.fcpi.fcpi_parm =
13402 pIocbOut->iocb.un.fcpi.fcpi_parm -
13403 wcqe->total_data_placed;
13404 else
13405 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smart695a8142010-01-26 23:08:03 -050013406 else {
James Smart4f774512009-05-22 14:52:35 -040013407 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smartaf227412013-10-10 12:23:10 -040013408 switch (pIocbOut->iocb.ulpCommand) {
13409 case CMD_ELS_REQUEST64_CR:
13410 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13411 bpl = (struct ulp_bde64 *)dmabuf->virt;
13412 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13413 max_response = bde.tus.f.bdeSize;
13414 break;
13415 case CMD_GEN_REQUEST64_CR:
13416 max_response = 0;
13417 if (!pIocbOut->context3)
13418 break;
13419 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13420 sizeof(struct ulp_bde64);
13421 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13422 bpl = (struct ulp_bde64 *)dmabuf->virt;
13423 for (i = 0; i < numBdes; i++) {
13424 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13425 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13426 max_response += bde.tus.f.bdeSize;
13427 }
13428 break;
13429 default:
13430 max_response = wcqe->total_data_placed;
13431 break;
13432 }
13433 if (max_response < wcqe->total_data_placed)
13434 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13435 else
13436 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13437 wcqe->total_data_placed;
James Smart695a8142010-01-26 23:08:03 -050013438 }
James Smart341af102010-01-26 23:07:37 -050013439
James Smartacd68592012-01-18 16:25:09 -050013440 /* Convert BG errors for completion status */
13441 if (status == CQE_STATUS_DI_ERROR) {
13442 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13443
13444 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13445 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13446 else
13447 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13448
13449 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13450 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13451 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13452 BGS_GUARD_ERR_MASK;
13453 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13454 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13455 BGS_APPTAG_ERR_MASK;
13456 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13457 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13458 BGS_REFTAG_ERR_MASK;
13459
13460 /* Check to see if there was any good data before the error */
13461 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13462 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13463 BGS_HI_WATER_MARK_PRESENT_MASK;
13464 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13465 wcqe->total_data_placed;
13466 }
13467
13468 /*
13469 * Set ALL the error bits to indicate we don't know what
13470 * type of error it is.
13471 */
13472 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13473 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13474 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13475 BGS_GUARD_ERR_MASK);
13476 }
13477
James Smart341af102010-01-26 23:07:37 -050013478 /* Pick up HBA exchange busy condition */
13479 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13480 spin_lock_irqsave(&phba->hbalock, iflags);
13481 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13482 spin_unlock_irqrestore(&phba->hbalock, iflags);
13483 }
James Smart4f774512009-05-22 14:52:35 -040013484}
13485
13486/**
James Smart45ed1192009-10-02 15:17:02 -040013487 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13488 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010013489 * @irspiocbq: Pointer to work-queue completion queue entry.
James Smart45ed1192009-10-02 15:17:02 -040013490 *
13491 * This routine handles an ELS work-queue completion event and construct
13492 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13493 * discovery engine to handle.
13494 *
13495 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13496 **/
13497static struct lpfc_iocbq *
13498lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13499 struct lpfc_iocbq *irspiocbq)
13500{
James Smart895427b2017-02-12 13:52:30 -080013501 struct lpfc_sli_ring *pring;
James Smart45ed1192009-10-02 15:17:02 -040013502 struct lpfc_iocbq *cmdiocbq;
13503 struct lpfc_wcqe_complete *wcqe;
13504 unsigned long iflags;
13505
James Smart895427b2017-02-12 13:52:30 -080013506 pring = lpfc_phba_elsring(phba);
Dick Kennedy1234a6d2017-09-29 17:34:29 -070013507 if (unlikely(!pring))
13508 return NULL;
James Smart895427b2017-02-12 13:52:30 -080013509
James Smart45ed1192009-10-02 15:17:02 -040013510 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
James Smart45ed1192009-10-02 15:17:02 -040013511 pring->stats.iocb_event++;
13512 /* Look up the ELS command IOCB and create pseudo response IOCB */
13513 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13514 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart45ed1192009-10-02 15:17:02 -040013515 if (unlikely(!cmdiocbq)) {
13516 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13517 "0386 ELS complete with no corresponding "
Dick Kennedy401bb412017-09-29 17:34:28 -070013518 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13519 wcqe->word0, wcqe->total_data_placed,
13520 wcqe->parameter, wcqe->word3);
James Smart45ed1192009-10-02 15:17:02 -040013521 lpfc_sli_release_iocbq(phba, irspiocbq);
13522 return NULL;
13523 }
13524
James Smarte2a8be52019-05-06 17:26:47 -070013525 spin_lock_irqsave(&pring->ring_lock, iflags);
Dick Kennedy401bb412017-09-29 17:34:28 -070013526 /* Put the iocb back on the txcmplq */
13527 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13528 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13529
James Smart45ed1192009-10-02 15:17:02 -040013530 /* Fake the irspiocbq and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050013531 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
James Smart45ed1192009-10-02 15:17:02 -040013532
13533 return irspiocbq;
13534}
13535
James Smart8a5ca102017-11-20 16:00:30 -080013536inline struct lpfc_cq_event *
13537lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13538{
13539 struct lpfc_cq_event *cq_event;
13540
13541 /* Allocate a new internal CQ_EVENT entry */
13542 cq_event = lpfc_sli4_cq_event_alloc(phba);
13543 if (!cq_event) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart8a5ca102017-11-20 16:00:30 -080013545 "0602 Failed to alloc CQ_EVENT entry\n");
13546 return NULL;
13547 }
13548
13549 /* Move the CQE into the event */
13550 memcpy(&cq_event->cqe, entry, size);
13551 return cq_event;
13552}
13553
James Smart45ed1192009-10-02 15:17:02 -040013554/**
Colin Ian King291c2542019-12-18 08:43:01 +000013555 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
James Smart04c68492009-05-22 14:52:52 -040013556 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010013557 * @mcqe: Pointer to mailbox completion queue entry.
James Smart04c68492009-05-22 14:52:52 -040013558 *
Colin Ian King291c2542019-12-18 08:43:01 +000013559 * This routine process a mailbox completion queue entry with asynchronous
James Smart04c68492009-05-22 14:52:52 -040013560 * event.
13561 *
13562 * Return: true if work posted to worker thread, otherwise false.
13563 **/
13564static bool
13565lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13566{
13567 struct lpfc_cq_event *cq_event;
13568 unsigned long iflags;
13569
13570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13571 "0392 Async Event: word0:x%x, word1:x%x, "
13572 "word2:x%x, word3:x%x\n", mcqe->word0,
13573 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13574
James Smart8a5ca102017-11-20 16:00:30 -080013575 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13576 if (!cq_event)
James Smart04c68492009-05-22 14:52:52 -040013577 return false;
James Smarte7dab162020-10-20 13:27:12 -070013578
13579 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
James Smart04c68492009-05-22 14:52:52 -040013580 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
James Smarte7dab162020-10-20 13:27:12 -070013581 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13582
James Smart04c68492009-05-22 14:52:52 -040013583 /* Set the async event flag */
James Smarte7dab162020-10-20 13:27:12 -070013584 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart04c68492009-05-22 14:52:52 -040013585 phba->hba_flag |= ASYNC_EVENT;
13586 spin_unlock_irqrestore(&phba->hbalock, iflags);
13587
13588 return true;
13589}
13590
13591/**
13592 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13593 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010013594 * @mcqe: Pointer to mailbox completion queue entry.
James Smart04c68492009-05-22 14:52:52 -040013595 *
13596 * This routine process a mailbox completion queue entry with mailbox
13597 * completion event.
13598 *
13599 * Return: true if work posted to worker thread, otherwise false.
13600 **/
13601static bool
13602lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13603{
13604 uint32_t mcqe_status;
13605 MAILBOX_t *mbox, *pmbox;
13606 struct lpfc_mqe *mqe;
13607 struct lpfc_vport *vport;
13608 struct lpfc_nodelist *ndlp;
13609 struct lpfc_dmabuf *mp;
13610 unsigned long iflags;
13611 LPFC_MBOXQ_t *pmb;
13612 bool workposted = false;
13613 int rc;
13614
13615 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13616 if (!bf_get(lpfc_trailer_completed, mcqe))
13617 goto out_no_mqe_complete;
13618
13619 /* Get the reference to the active mbox command */
13620 spin_lock_irqsave(&phba->hbalock, iflags);
13621 pmb = phba->sli.mbox_active;
13622 if (unlikely(!pmb)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart04c68492009-05-22 14:52:52 -040013624 "1832 No pending MBOX command to handle\n");
13625 spin_unlock_irqrestore(&phba->hbalock, iflags);
13626 goto out_no_mqe_complete;
13627 }
13628 spin_unlock_irqrestore(&phba->hbalock, iflags);
13629 mqe = &pmb->u.mqe;
13630 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13631 mbox = phba->mbox;
13632 vport = pmb->vport;
13633
13634 /* Reset heartbeat timer */
13635 phba->last_completion_time = jiffies;
13636 del_timer(&phba->sli.mbox_tmo);
13637
13638 /* Move mbox data to caller's mailbox region, do endian swapping */
13639 if (pmb->mbox_cmpl && mbox)
James Smart48f8fdb2018-05-04 20:37:51 -070013640 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
James Smart04c68492009-05-22 14:52:52 -040013641
James Smart73d91e52011-10-10 21:32:10 -040013642 /*
13643 * For mcqe errors, conditionally move a modified error code to
13644 * the mbox so that the error will not be missed.
13645 */
13646 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13647 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13648 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13649 bf_set(lpfc_mqe_status, mqe,
13650 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13651 }
James Smart04c68492009-05-22 14:52:52 -040013652 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13653 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13654 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13655 "MBOX dflt rpi: status:x%x rpi:x%x",
13656 mcqe_status,
13657 pmbox->un.varWords[0], 0);
13658 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
James Smart3e1f0712018-11-29 16:09:29 -080013659 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13660 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
James Smart01131e72021-05-14 12:55:49 -070013661
13662 /* Reg_LOGIN of dflt RPI was successful. Mark the
13663 * node as having an UNREG_LOGIN in progress to stop
13664 * an unsolicited PLOGI from the same NPortId from
13665 * starting another mailbox transaction.
James Smart04c68492009-05-22 14:52:52 -040013666 */
James Smart01131e72021-05-14 12:55:49 -070013667 spin_lock_irqsave(&ndlp->lock, iflags);
13668 ndlp->nlp_flag |= NLP_UNREG_INP;
13669 spin_unlock_irqrestore(&ndlp->lock, iflags);
James Smart04c68492009-05-22 14:52:52 -040013670 lpfc_unreg_login(phba, vport->vpi,
13671 pmbox->un.varWords[0], pmb);
13672 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
James Smart3e1f0712018-11-29 16:09:29 -080013673 pmb->ctx_buf = mp;
James Smarte9b11082020-11-15 11:26:33 -080013674
13675 /* No reference taken here. This is a default
13676 * RPI reg/immediate unreg cycle. The reference was
13677 * taken in the reg rpi path and is released when
13678 * this mailbox completes.
13679 */
James Smart3e1f0712018-11-29 16:09:29 -080013680 pmb->ctx_ndlp = ndlp;
James Smart04c68492009-05-22 14:52:52 -040013681 pmb->vport = vport;
13682 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13683 if (rc != MBX_BUSY)
Dick Kennedy372c1872020-06-30 14:50:00 -070013684 lpfc_printf_log(phba, KERN_ERR,
13685 LOG_TRACE_EVENT,
13686 "0385 rc should "
James Smart04c68492009-05-22 14:52:52 -040013687 "have been MBX_BUSY\n");
13688 if (rc != MBX_NOT_FINISHED)
13689 goto send_current_mbox;
13690 }
13691 }
13692 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13693 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13694 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13695
James Smarta22d73b2021-01-04 10:02:38 -080013696 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
13697 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13698 spin_lock_irqsave(&phba->hbalock, iflags);
13699 /* Release the mailbox command posting token */
13700 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13701 phba->sli.mbox_active = NULL;
13702 if (bf_get(lpfc_trailer_consumed, mcqe))
13703 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13704 spin_unlock_irqrestore(&phba->hbalock, iflags);
13705
13706 /* Post the next mbox command, if there is one */
13707 lpfc_sli4_post_async_mbox(phba);
13708
13709 /* Process cmpl now */
13710 if (pmb->mbox_cmpl)
13711 pmb->mbox_cmpl(phba, pmb);
13712 return false;
13713 }
13714
13715 /* There is mailbox completion work to queue to the worker thread */
James Smart04c68492009-05-22 14:52:52 -040013716 spin_lock_irqsave(&phba->hbalock, iflags);
13717 __lpfc_mbox_cmpl_put(phba, pmb);
13718 phba->work_ha |= HA_MBATT;
13719 spin_unlock_irqrestore(&phba->hbalock, iflags);
13720 workposted = true;
13721
13722send_current_mbox:
13723 spin_lock_irqsave(&phba->hbalock, iflags);
13724 /* Release the mailbox command posting token */
13725 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13726 /* Setting active mailbox pointer need to be in sync to flag clear */
13727 phba->sli.mbox_active = NULL;
James Smart07b85822019-09-21 20:58:53 -070013728 if (bf_get(lpfc_trailer_consumed, mcqe))
13729 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
James Smart04c68492009-05-22 14:52:52 -040013730 spin_unlock_irqrestore(&phba->hbalock, iflags);
13731 /* Wake up worker thread to post the next pending mailbox command */
13732 lpfc_worker_wake_up(phba);
James Smart07b85822019-09-21 20:58:53 -070013733 return workposted;
13734
James Smart04c68492009-05-22 14:52:52 -040013735out_no_mqe_complete:
James Smart07b85822019-09-21 20:58:53 -070013736 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart04c68492009-05-22 14:52:52 -040013737 if (bf_get(lpfc_trailer_consumed, mcqe))
13738 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
James Smart07b85822019-09-21 20:58:53 -070013739 spin_unlock_irqrestore(&phba->hbalock, iflags);
13740 return false;
James Smart04c68492009-05-22 14:52:52 -040013741}
13742
13743/**
13744 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13745 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010013746 * @cq: Pointer to associated CQ
James Smart04c68492009-05-22 14:52:52 -040013747 * @cqe: Pointer to mailbox completion queue entry.
13748 *
13749 * This routine process a mailbox completion queue entry, it invokes the
Colin Ian King291c2542019-12-18 08:43:01 +000013750 * proper mailbox complete handling or asynchronous event handling routine
James Smart04c68492009-05-22 14:52:52 -040013751 * according to the MCQE's async bit.
13752 *
13753 * Return: true if work posted to worker thread, otherwise false.
13754 **/
13755static bool
James Smart32517fc2019-01-28 11:14:33 -080013756lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13757 struct lpfc_cqe *cqe)
James Smart04c68492009-05-22 14:52:52 -040013758{
13759 struct lpfc_mcqe mcqe;
13760 bool workposted;
13761
James Smart32517fc2019-01-28 11:14:33 -080013762 cq->CQ_mbox++;
13763
James Smart04c68492009-05-22 14:52:52 -040013764 /* Copy the mailbox MCQE and convert endian order as needed */
James Smart48f8fdb2018-05-04 20:37:51 -070013765 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
James Smart04c68492009-05-22 14:52:52 -040013766
13767 /* Invoke the proper event handling routine */
13768 if (!bf_get(lpfc_trailer_async, &mcqe))
13769 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13770 else
13771 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13772 return workposted;
13773}
13774
13775/**
James Smart4f774512009-05-22 14:52:35 -040013776 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13777 * @phba: Pointer to HBA context object.
James Smart2a76a282012-08-03 12:35:54 -040013778 * @cq: Pointer to associated CQ
James Smart4f774512009-05-22 14:52:35 -040013779 * @wcqe: Pointer to work-queue completion queue entry.
13780 *
13781 * This routine handles an ELS work-queue completion event.
13782 *
13783 * Return: true if work posted to worker thread, otherwise false.
13784 **/
13785static bool
James Smart2a76a282012-08-03 12:35:54 -040013786lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013787 struct lpfc_wcqe_complete *wcqe)
13788{
James Smart4f774512009-05-22 14:52:35 -040013789 struct lpfc_iocbq *irspiocbq;
13790 unsigned long iflags;
James Smart2a76a282012-08-03 12:35:54 -040013791 struct lpfc_sli_ring *pring = cq->pring;
James Smart0e9bb8d2013-03-01 16:35:12 -050013792 int txq_cnt = 0;
13793 int txcmplq_cnt = 0;
James Smart4f774512009-05-22 14:52:35 -040013794
James Smart11f0e342018-05-04 20:37:57 -070013795 /* Check for response status */
13796 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13797 /* Log the error status */
13798 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13799 "0357 ELS CQE error: status=x%x: "
13800 "CQE: %08x %08x %08x %08x\n",
13801 bf_get(lpfc_wcqe_c_status, wcqe),
13802 wcqe->word0, wcqe->total_data_placed,
13803 wcqe->parameter, wcqe->word3);
13804 }
13805
James Smart45ed1192009-10-02 15:17:02 -040013806 /* Get an irspiocbq for later ELS response processing use */
James Smart4f774512009-05-22 14:52:35 -040013807 irspiocbq = lpfc_sli_get_iocbq(phba);
13808 if (!irspiocbq) {
James Smart0e9bb8d2013-03-01 16:35:12 -050013809 if (!list_empty(&pring->txq))
13810 txq_cnt++;
13811 if (!list_empty(&pring->txcmplq))
13812 txcmplq_cnt++;
Dick Kennedy372c1872020-06-30 14:50:00 -070013813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2a9bf3d2010-06-07 15:24:45 -040013814 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
James Smartff349bc2019-09-21 20:59:05 -070013815 "els_txcmplq_cnt=%d\n",
James Smart0e9bb8d2013-03-01 16:35:12 -050013816 txq_cnt, phba->iocb_cnt,
James Smart0e9bb8d2013-03-01 16:35:12 -050013817 txcmplq_cnt);
James Smart45ed1192009-10-02 15:17:02 -040013818 return false;
James Smart4f774512009-05-22 14:52:35 -040013819 }
James Smart4f774512009-05-22 14:52:35 -040013820
James Smart45ed1192009-10-02 15:17:02 -040013821 /* Save off the slow-path queue event for work thread to process */
13822 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
James Smart4f774512009-05-22 14:52:35 -040013823 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart4d9ab992009-10-02 15:16:39 -040013824 list_add_tail(&irspiocbq->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040013825 &phba->sli4_hba.sp_queue_event);
13826 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4f774512009-05-22 14:52:35 -040013827 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart4f774512009-05-22 14:52:35 -040013828
James Smart45ed1192009-10-02 15:17:02 -040013829 return true;
James Smart4f774512009-05-22 14:52:35 -040013830}
13831
13832/**
13833 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13834 * @phba: Pointer to HBA context object.
13835 * @wcqe: Pointer to work-queue completion queue entry.
13836 *
Masahiro Yamada3f8b6fb2017-02-27 14:29:25 -080013837 * This routine handles slow-path WQ entry consumed event by invoking the
James Smart4f774512009-05-22 14:52:35 -040013838 * proper WQ release routine to the slow-path WQ.
13839 **/
13840static void
13841lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13842 struct lpfc_wcqe_release *wcqe)
13843{
James Smart2e90f4b2011-12-13 13:22:37 -050013844 /* sanity check on queue memory */
13845 if (unlikely(!phba->sli4_hba.els_wq))
13846 return;
James Smart4f774512009-05-22 14:52:35 -040013847 /* Check for the slow-path ELS work queue */
13848 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13849 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13850 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13851 else
13852 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13853 "2579 Slow-path wqe consume event carries "
13854 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13855 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13856 phba->sli4_hba.els_wq->queue_id);
13857}
13858
13859/**
13860 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13861 * @phba: Pointer to HBA context object.
13862 * @cq: Pointer to a WQ completion queue.
13863 * @wcqe: Pointer to work-queue completion queue entry.
13864 *
13865 * This routine handles an XRI abort event.
13866 *
13867 * Return: true if work posted to worker thread, otherwise false.
13868 **/
13869static bool
13870lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13871 struct lpfc_queue *cq,
13872 struct sli4_wcqe_xri_aborted *wcqe)
13873{
13874 bool workposted = false;
13875 struct lpfc_cq_event *cq_event;
13876 unsigned long iflags;
13877
James Smart4f774512009-05-22 14:52:35 -040013878 switch (cq->subtype) {
James Smartc00f62e2019-08-14 16:57:11 -070013879 case LPFC_IO:
13880 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13881 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13882 /* Notify aborted XRI for NVME work queue */
13883 if (phba->nvmet_support)
13884 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13885 }
James Smart5e5b5112019-01-28 11:14:22 -080013886 workposted = false;
James Smart4f774512009-05-22 14:52:35 -040013887 break;
James Smart422c4cb2017-11-20 16:00:32 -080013888 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
James Smart4f774512009-05-22 14:52:35 -040013889 case LPFC_ELS:
James Smarte7dab162020-10-20 13:27:12 -070013890 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13891 if (!cq_event) {
13892 workposted = false;
13893 break;
13894 }
James Smart5e5b5112019-01-28 11:14:22 -080013895 cq_event->hdwq = cq->hdwq;
James Smarte7dab162020-10-20 13:27:12 -070013896 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13897 iflags);
James Smart4f774512009-05-22 14:52:35 -040013898 list_add_tail(&cq_event->list,
13899 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13900 /* Set the els xri abort event flag */
13901 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
James Smarte7dab162020-10-20 13:27:12 -070013902 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13903 iflags);
James Smart4f774512009-05-22 14:52:35 -040013904 workposted = true;
13905 break;
13906 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070013907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart318083a2017-03-04 09:30:30 -080013908 "0603 Invalid CQ subtype %d: "
13909 "%08x %08x %08x %08x\n",
13910 cq->subtype, wcqe->word0, wcqe->parameter,
13911 wcqe->word2, wcqe->word3);
James Smart4f774512009-05-22 14:52:35 -040013912 workposted = false;
13913 break;
13914 }
13915 return workposted;
13916}
13917
James Smarte817e5d2018-12-13 15:17:53 -080013918#define FC_RCTL_MDS_DIAGS 0xF4
13919
James Smart4f774512009-05-22 14:52:35 -040013920/**
James Smart4d9ab992009-10-02 15:16:39 -040013921 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
James Smart4f774512009-05-22 14:52:35 -040013922 * @phba: Pointer to HBA context object.
James Smart4d9ab992009-10-02 15:16:39 -040013923 * @rcqe: Pointer to receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040013924 *
James Smart4d9ab992009-10-02 15:16:39 -040013925 * This routine process a receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040013926 *
13927 * Return: true if work posted to worker thread, otherwise false.
13928 **/
13929static bool
James Smart4d9ab992009-10-02 15:16:39 -040013930lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13931{
13932 bool workposted = false;
James Smarte817e5d2018-12-13 15:17:53 -080013933 struct fc_frame_header *fc_hdr;
James Smart4d9ab992009-10-02 15:16:39 -040013934 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13935 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
James Smart547077a2017-05-15 15:20:40 -070013936 struct lpfc_nvmet_tgtport *tgtp;
James Smart4d9ab992009-10-02 15:16:39 -040013937 struct hbq_dmabuf *dma_buf;
James Smart7851fe22011-07-22 18:36:52 -040013938 uint32_t status, rq_id;
James Smart4d9ab992009-10-02 15:16:39 -040013939 unsigned long iflags;
13940
James Smart2e90f4b2011-12-13 13:22:37 -050013941 /* sanity check on queue memory */
13942 if (unlikely(!hrq) || unlikely(!drq))
13943 return workposted;
13944
James Smart7851fe22011-07-22 18:36:52 -040013945 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13946 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13947 else
13948 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13949 if (rq_id != hrq->queue_id)
James Smart4d9ab992009-10-02 15:16:39 -040013950 goto out;
13951
13952 status = bf_get(lpfc_rcqe_status, rcqe);
13953 switch (status) {
13954 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
Dick Kennedy372c1872020-06-30 14:50:00 -070013955 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4d9ab992009-10-02 15:16:39 -040013956 "2537 Receive Frame Truncated!!\n");
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050013957 fallthrough;
James Smart4d9ab992009-10-02 15:16:39 -040013958 case FC_STATUS_RQ_SUCCESS:
13959 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartcbc5de12017-12-08 17:18:04 -080013960 lpfc_sli4_rq_release(hrq, drq);
James Smart4d9ab992009-10-02 15:16:39 -040013961 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13962 if (!dma_buf) {
James Smartb84daac2012-08-03 12:35:13 -040013963 hrq->RQ_no_buf_found++;
James Smart4d9ab992009-10-02 15:16:39 -040013964 spin_unlock_irqrestore(&phba->hbalock, iflags);
13965 goto out;
13966 }
James Smartb84daac2012-08-03 12:35:13 -040013967 hrq->RQ_rcv_buf++;
James Smart547077a2017-05-15 15:20:40 -070013968 hrq->RQ_buf_posted--;
James Smart4d9ab992009-10-02 15:16:39 -040013969 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
James Smart895427b2017-02-12 13:52:30 -080013970
James Smarte817e5d2018-12-13 15:17:53 -080013971 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13972
13973 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13974 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13975 spin_unlock_irqrestore(&phba->hbalock, iflags);
13976 /* Handle MDS Loopback frames */
Dick Kennedy24411fc2020-08-03 14:02:25 -070013977 if (!(phba->pport->load_flag & FC_UNLOADING))
13978 lpfc_sli4_handle_mds_loopback(phba->pport,
13979 dma_buf);
13980 else
13981 lpfc_in_buf_free(phba, &dma_buf->dbuf);
James Smarte817e5d2018-12-13 15:17:53 -080013982 break;
13983 }
13984
13985 /* save off the frame for the work thread to process */
James Smart4d9ab992009-10-02 15:16:39 -040013986 list_add_tail(&dma_buf->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040013987 &phba->sli4_hba.sp_queue_event);
James Smart4d9ab992009-10-02 15:16:39 -040013988 /* Frame received */
James Smart45ed1192009-10-02 15:17:02 -040013989 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4d9ab992009-10-02 15:16:39 -040013990 spin_unlock_irqrestore(&phba->hbalock, iflags);
13991 workposted = true;
13992 break;
James Smart4d9ab992009-10-02 15:16:39 -040013993 case FC_STATUS_INSUFF_BUF_FRM_DISC:
James Smart547077a2017-05-15 15:20:40 -070013994 if (phba->nvmet_support) {
13995 tgtp = phba->targetport->private;
Dick Kennedy372c1872020-06-30 14:50:00 -070013996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart547077a2017-05-15 15:20:40 -070013997 "6402 RQE Error x%x, posted %d err_cnt "
13998 "%d: %x %x %x\n",
13999 status, hrq->RQ_buf_posted,
14000 hrq->RQ_no_posted_buf,
14001 atomic_read(&tgtp->rcv_fcp_cmd_in),
14002 atomic_read(&tgtp->rcv_fcp_cmd_out),
14003 atomic_read(&tgtp->xmt_fcp_release));
14004 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050014005 fallthrough;
James Smart547077a2017-05-15 15:20:40 -070014006
14007 case FC_STATUS_INSUFF_BUF_NEED_BUF:
James Smartb84daac2012-08-03 12:35:13 -040014008 hrq->RQ_no_posted_buf++;
James Smart4d9ab992009-10-02 15:16:39 -040014009 /* Post more buffers if possible */
14010 spin_lock_irqsave(&phba->hbalock, iflags);
14011 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14012 spin_unlock_irqrestore(&phba->hbalock, iflags);
14013 workposted = true;
14014 break;
14015 }
14016out:
14017 return workposted;
James Smart4d9ab992009-10-02 15:16:39 -040014018}
14019
14020/**
14021 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14022 * @phba: Pointer to HBA context object.
14023 * @cq: Pointer to the completion queue.
James Smart32517fc2019-01-28 11:14:33 -080014024 * @cqe: Pointer to a completion queue entry.
James Smart4d9ab992009-10-02 15:16:39 -040014025 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030014026 * This routine process a slow-path work-queue or receive queue completion queue
James Smart4d9ab992009-10-02 15:16:39 -040014027 * entry.
14028 *
14029 * Return: true if work posted to worker thread, otherwise false.
14030 **/
14031static bool
14032lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040014033 struct lpfc_cqe *cqe)
14034{
James Smart45ed1192009-10-02 15:17:02 -040014035 struct lpfc_cqe cqevt;
James Smart4f774512009-05-22 14:52:35 -040014036 bool workposted = false;
14037
14038 /* Copy the work queue CQE and convert endian order if needed */
James Smart48f8fdb2018-05-04 20:37:51 -070014039 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040014040
14041 /* Check and process for different type of WCQE and dispatch */
James Smart45ed1192009-10-02 15:17:02 -040014042 switch (bf_get(lpfc_cqe_code, &cqevt)) {
James Smart4f774512009-05-22 14:52:35 -040014043 case CQE_CODE_COMPL_WQE:
James Smart45ed1192009-10-02 15:17:02 -040014044 /* Process the WQ/RQ complete event */
James Smartbc739052010-08-04 16:11:18 -040014045 phba->last_completion_time = jiffies;
James Smart2a76a282012-08-03 12:35:54 -040014046 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040014047 (struct lpfc_wcqe_complete *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040014048 break;
14049 case CQE_CODE_RELEASE_WQE:
14050 /* Process the WQ release event */
14051 lpfc_sli4_sp_handle_rel_wcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040014052 (struct lpfc_wcqe_release *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040014053 break;
14054 case CQE_CODE_XRI_ABORTED:
14055 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040014056 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040014057 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040014058 (struct sli4_wcqe_xri_aborted *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040014059 break;
James Smart4d9ab992009-10-02 15:16:39 -040014060 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -040014061 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -040014062 /* Process the RQ event */
James Smartbc739052010-08-04 16:11:18 -040014063 phba->last_completion_time = jiffies;
James Smart4d9ab992009-10-02 15:16:39 -040014064 workposted = lpfc_sli4_sp_handle_rcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040014065 (struct lpfc_rcqe *)&cqevt);
James Smart4d9ab992009-10-02 15:16:39 -040014066 break;
James Smart4f774512009-05-22 14:52:35 -040014067 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070014068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040014069 "0388 Not a valid WCQE code: x%x\n",
James Smart45ed1192009-10-02 15:17:02 -040014070 bf_get(lpfc_cqe_code, &cqevt));
James Smart4f774512009-05-22 14:52:35 -040014071 break;
14072 }
14073 return workposted;
14074}
14075
14076/**
James Smart4f774512009-05-22 14:52:35 -040014077 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14078 * @phba: Pointer to HBA context object.
14079 * @eqe: Pointer to fast-path event queue entry.
Lee Jones7af29d42020-07-21 17:41:31 +010014080 * @speq: Pointer to slow-path event queue.
James Smart4f774512009-05-22 14:52:35 -040014081 *
14082 * This routine process a event queue entry from the slow-path event queue.
14083 * It will check the MajorCode and MinorCode to determine this is for a
14084 * completion event on a completion queue, if not, an error shall be logged
14085 * and just return. Otherwise, it will get to the corresponding completion
14086 * queue and process all the entries on that completion queue, rearm the
14087 * completion queue, and then return.
14088 *
14089 **/
Dick Kennedyf485c182017-09-29 17:34:34 -070014090static void
James Smart67d12732012-08-03 12:36:13 -040014091lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14092 struct lpfc_queue *speq)
James Smart4f774512009-05-22 14:52:35 -040014093{
James Smart67d12732012-08-03 12:36:13 -040014094 struct lpfc_queue *cq = NULL, *childq;
James Smart4f774512009-05-22 14:52:35 -040014095 uint16_t cqid;
Dick Kennedy86ee57a2020-06-30 14:49:55 -070014096 int ret = 0;
James Smart4f774512009-05-22 14:52:35 -040014097
James Smart4f774512009-05-22 14:52:35 -040014098 /* Get the reference to the corresponding CQ */
James Smartcb5172e2010-03-15 11:25:07 -040014099 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
James Smart4f774512009-05-22 14:52:35 -040014100
James Smart4f774512009-05-22 14:52:35 -040014101 list_for_each_entry(childq, &speq->child_list, list) {
14102 if (childq->queue_id == cqid) {
14103 cq = childq;
14104 break;
14105 }
14106 }
14107 if (unlikely(!cq)) {
James Smart75baf692010-06-08 18:31:21 -040014108 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
Dick Kennedy372c1872020-06-30 14:50:00 -070014109 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart75baf692010-06-08 18:31:21 -040014110 "0365 Slow-path CQ identifier "
14111 "(%d) does not exist\n", cqid);
Dick Kennedyf485c182017-09-29 17:34:34 -070014112 return;
James Smart4f774512009-05-22 14:52:35 -040014113 }
14114
James Smart895427b2017-02-12 13:52:30 -080014115 /* Save EQ associated with this CQ */
14116 cq->assoc_qp = speq;
14117
Dick Kennedy86ee57a2020-06-30 14:49:55 -070014118 if (is_kdump_kernel())
14119 ret = queue_work(phba->wq, &cq->spwork);
14120 else
14121 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14122
14123 if (!ret)
Dick Kennedy372c1872020-06-30 14:50:00 -070014124 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014125 "0390 Cannot schedule queue work "
Dick Kennedyf485c182017-09-29 17:34:34 -070014126 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
Bart Van Assched6d189c2019-03-28 11:06:22 -070014127 cqid, cq->queue_id, raw_smp_processor_id());
Dick Kennedyf485c182017-09-29 17:34:34 -070014128}
14129
14130/**
James Smart32517fc2019-01-28 11:14:33 -080014131 * __lpfc_sli4_process_cq - Process elements of a CQ
Dick Kennedyf485c182017-09-29 17:34:34 -070014132 * @phba: Pointer to HBA context object.
James Smart32517fc2019-01-28 11:14:33 -080014133 * @cq: Pointer to CQ to be processed
14134 * @handler: Routine to process each cqe
14135 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
Dick Kennedy317aeb82020-06-30 14:49:59 -070014136 * @poll_mode: Polling mode we were called from
Dick Kennedyf485c182017-09-29 17:34:34 -070014137 *
James Smart32517fc2019-01-28 11:14:33 -080014138 * This routine processes completion queue entries in a CQ. While a valid
14139 * queue element is found, the handler is called. During processing checks
14140 * are made for periodic doorbell writes to let the hardware know of
14141 * element consumption.
Dick Kennedyf485c182017-09-29 17:34:34 -070014142 *
James Smart32517fc2019-01-28 11:14:33 -080014143 * If the max limit on cqes to process is hit, or there are no more valid
14144 * entries, the loop stops. If we processed a sufficient number of elements,
14145 * meaning there is sufficient load, rather than rearming and generating
14146 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14147 * indicates no rescheduling.
14148 *
14149 * Returns True if work scheduled, False otherwise.
Dick Kennedyf485c182017-09-29 17:34:34 -070014150 **/
James Smart32517fc2019-01-28 11:14:33 -080014151static bool
14152__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14153 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014154 struct lpfc_cqe *), unsigned long *delay,
14155 enum lpfc_poll_mode poll_mode)
Dick Kennedyf485c182017-09-29 17:34:34 -070014156{
Dick Kennedyf485c182017-09-29 17:34:34 -070014157 struct lpfc_cqe *cqe;
14158 bool workposted = false;
James Smart32517fc2019-01-28 11:14:33 -080014159 int count = 0, consumed = 0;
14160 bool arm = true;
14161
14162 /* default - no reschedule */
14163 *delay = 0;
14164
14165 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14166 goto rearm_and_exit;
Dick Kennedyf485c182017-09-29 17:34:34 -070014167
James Smart4f774512009-05-22 14:52:35 -040014168 /* Process all the entries to the CQ */
James Smartd74a89a2019-05-21 17:48:55 -070014169 cq->q_flag = 0;
James Smart32517fc2019-01-28 11:14:33 -080014170 cqe = lpfc_sli4_cq_get(cq);
14171 while (cqe) {
James Smart32517fc2019-01-28 11:14:33 -080014172 workposted |= handler(phba, cq, cqe);
14173 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14174
14175 consumed++;
14176 if (!(++count % cq->max_proc_limit))
14177 break;
14178
14179 if (!(count % cq->notify_interval)) {
14180 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14181 LPFC_QUEUE_NOARM);
14182 consumed = 0;
James Smart8156d372019-10-18 14:18:26 -070014183 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
James Smart4f774512009-05-22 14:52:35 -040014184 }
James Smartb84daac2012-08-03 12:35:13 -040014185
James Smartd74a89a2019-05-21 17:48:55 -070014186 if (count == LPFC_NVMET_CQ_NOTIFY)
14187 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14188
James Smart32517fc2019-01-28 11:14:33 -080014189 cqe = lpfc_sli4_cq_get(cq);
14190 }
14191 if (count >= phba->cfg_cq_poll_threshold) {
14192 *delay = 1;
14193 arm = false;
14194 }
14195
Dick Kennedy317aeb82020-06-30 14:49:59 -070014196 /* Note: complete the irq_poll softirq before rearming CQ */
14197 if (poll_mode == LPFC_IRQ_POLL)
14198 irq_poll_complete(&cq->iop);
14199
James Smart32517fc2019-01-28 11:14:33 -080014200 /* Track the max number of CQEs processed in 1 EQ */
14201 if (count > cq->CQ_max_cqe)
14202 cq->CQ_max_cqe = count;
14203
14204 cq->assoc_qp->EQ_cqe_cnt += count;
14205
14206 /* Catch the no cq entry condition */
14207 if (unlikely(count == 0))
14208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14209 "0369 No entry from completion queue "
14210 "qid=%d\n", cq->queue_id);
14211
Dick Kennedy164ba8d2020-05-01 14:43:03 -070014212 xchg(&cq->queue_claimed, 0);
James Smart32517fc2019-01-28 11:14:33 -080014213
14214rearm_and_exit:
14215 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14216 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14217
14218 return workposted;
14219}
14220
14221/**
Lee Jones8514e2f2021-03-03 14:46:18 +000014222 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
James Smart32517fc2019-01-28 11:14:33 -080014223 * @cq: pointer to CQ to process
14224 *
14225 * This routine calls the cq processing routine with a handler specific
14226 * to the type of queue bound to it.
14227 *
14228 * The CQ routine returns two values: the first is the calling status,
14229 * which indicates whether work was queued to the background discovery
14230 * thread. If true, the routine should wakeup the discovery thread;
14231 * the second is the delay parameter. If non-zero, rather than rearming
14232 * the CQ and yet another interrupt, the CQ handler should be queued so
14233 * that it is processed in a subsequent polling action. The value of
14234 * the delay indicates when to reschedule it.
14235 **/
14236static void
14237__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14238{
14239 struct lpfc_hba *phba = cq->phba;
14240 unsigned long delay;
14241 bool workposted = false;
Dick Kennedy86ee57a2020-06-30 14:49:55 -070014242 int ret = 0;
James Smart32517fc2019-01-28 11:14:33 -080014243
14244 /* Process and rearm the CQ */
14245 switch (cq->type) {
14246 case LPFC_MCQ:
14247 workposted |= __lpfc_sli4_process_cq(phba, cq,
14248 lpfc_sli4_sp_handle_mcqe,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014249 &delay, LPFC_QUEUE_WORK);
James Smart32517fc2019-01-28 11:14:33 -080014250 break;
14251 case LPFC_WCQ:
James Smartc00f62e2019-08-14 16:57:11 -070014252 if (cq->subtype == LPFC_IO)
James Smart32517fc2019-01-28 11:14:33 -080014253 workposted |= __lpfc_sli4_process_cq(phba, cq,
14254 lpfc_sli4_fp_handle_cqe,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014255 &delay, LPFC_QUEUE_WORK);
James Smart32517fc2019-01-28 11:14:33 -080014256 else
14257 workposted |= __lpfc_sli4_process_cq(phba, cq,
14258 lpfc_sli4_sp_handle_cqe,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014259 &delay, LPFC_QUEUE_WORK);
James Smart4f774512009-05-22 14:52:35 -040014260 break;
14261 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070014262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040014263 "0370 Invalid completion queue type (%d)\n",
14264 cq->type);
Dick Kennedyf485c182017-09-29 17:34:34 -070014265 return;
James Smart4f774512009-05-22 14:52:35 -040014266 }
14267
James Smart32517fc2019-01-28 11:14:33 -080014268 if (delay) {
Dick Kennedy86ee57a2020-06-30 14:49:55 -070014269 if (is_kdump_kernel())
14270 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14271 delay);
14272 else
14273 ret = queue_delayed_work_on(cq->chann, phba->wq,
14274 &cq->sched_spwork, delay);
14275 if (!ret)
Dick Kennedy372c1872020-06-30 14:50:00 -070014276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014277 "0394 Cannot schedule queue work "
James Smart32517fc2019-01-28 11:14:33 -080014278 "for cqid=%d on CPU %d\n",
14279 cq->queue_id, cq->chann);
14280 }
James Smart4f774512009-05-22 14:52:35 -040014281
14282 /* wake up worker thread if there are works to be done */
14283 if (workposted)
14284 lpfc_worker_wake_up(phba);
14285}
14286
14287/**
James Smart32517fc2019-01-28 11:14:33 -080014288 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14289 * interrupt
14290 * @work: pointer to work element
14291 *
14292 * translates from the work handler and calls the slow-path handler.
14293 **/
14294static void
14295lpfc_sli4_sp_process_cq(struct work_struct *work)
14296{
14297 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14298
14299 __lpfc_sli4_sp_process_cq(cq);
14300}
14301
14302/**
14303 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14304 * @work: pointer to work element
14305 *
14306 * translates from the work handler and calls the slow-path handler.
14307 **/
14308static void
14309lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14310{
14311 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14312 struct lpfc_queue, sched_spwork);
14313
14314 __lpfc_sli4_sp_process_cq(cq);
14315}
14316
14317/**
James Smart4f774512009-05-22 14:52:35 -040014318 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
James Smart2a76a282012-08-03 12:35:54 -040014319 * @phba: Pointer to HBA context object.
14320 * @cq: Pointer to associated CQ
14321 * @wcqe: Pointer to work-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040014322 *
14323 * This routine process a fast-path work queue completion entry from fast-path
14324 * event queue for FCP command response completion.
14325 **/
14326static void
James Smart2a76a282012-08-03 12:35:54 -040014327lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040014328 struct lpfc_wcqe_complete *wcqe)
14329{
James Smart2a76a282012-08-03 12:35:54 -040014330 struct lpfc_sli_ring *pring = cq->pring;
James Smart4f774512009-05-22 14:52:35 -040014331 struct lpfc_iocbq *cmdiocbq;
14332 struct lpfc_iocbq irspiocbq;
14333 unsigned long iflags;
14334
James Smart4f774512009-05-22 14:52:35 -040014335 /* Check for response status */
14336 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14337 /* If resource errors reported from HBA, reduce queue
14338 * depth of the SCSI device.
14339 */
James Smarte3d2b802012-08-14 14:25:43 -040014340 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14341 IOSTAT_LOCAL_REJECT)) &&
14342 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14343 IOERR_NO_RESOURCES))
James Smart4f774512009-05-22 14:52:35 -040014344 phba->lpfc_rampdown_queue_depth(phba);
James Smarte3d2b802012-08-14 14:25:43 -040014345
Dick Kennedy28ed7372020-06-30 14:49:56 -070014346 /* Log the cmpl status */
James Smart11f0e342018-05-04 20:37:57 -070014347 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
Dick Kennedy28ed7372020-06-30 14:49:56 -070014348 "0373 FCP CQE cmpl: status=x%x: "
James Smart11f0e342018-05-04 20:37:57 -070014349 "CQE: %08x %08x %08x %08x\n",
James Smart4f774512009-05-22 14:52:35 -040014350 bf_get(lpfc_wcqe_c_status, wcqe),
James Smart11f0e342018-05-04 20:37:57 -070014351 wcqe->word0, wcqe->total_data_placed,
14352 wcqe->parameter, wcqe->word3);
James Smart4f774512009-05-22 14:52:35 -040014353 }
14354
14355 /* Look up the FCP command IOCB and create pseudo response IOCB */
James Smart7e56aa22012-08-03 12:35:34 -040014356 spin_lock_irqsave(&pring->ring_lock, iflags);
14357 pring->stats.iocb_event++;
James Smarte2a8be52019-05-06 17:26:47 -070014358 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart4f774512009-05-22 14:52:35 -040014359 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14360 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart4f774512009-05-22 14:52:35 -040014361 if (unlikely(!cmdiocbq)) {
14362 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14363 "0374 FCP complete with no corresponding "
14364 "cmdiocb: iotag (%d)\n",
14365 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14366 return;
14367 }
Dick Kennedyc8a4ce02017-09-29 17:34:33 -070014368#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14369 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14370#endif
James Smart895427b2017-02-12 13:52:30 -080014371 if (cmdiocbq->iocb_cmpl == NULL) {
14372 if (cmdiocbq->wqe_cmpl) {
James Smart96e209b2020-11-15 11:26:43 -080014373 /* For FCP the flag is cleared in wqe_cmpl */
14374 if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
14375 cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
James Smart895427b2017-02-12 13:52:30 -080014376 spin_lock_irqsave(&phba->hbalock, iflags);
14377 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14378 spin_unlock_irqrestore(&phba->hbalock, iflags);
14379 }
14380
14381 /* Pass the cmd_iocb and the wcqe to the upper layer */
14382 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14383 return;
14384 }
James Smart4f774512009-05-22 14:52:35 -040014385 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14386 "0375 FCP cmdiocb not callback function "
14387 "iotag: (%d)\n",
14388 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14389 return;
14390 }
14391
James Smart96e209b2020-11-15 11:26:43 -080014392 /* Only SLI4 non-IO commands stil use IOCB */
James Smart4f774512009-05-22 14:52:35 -040014393 /* Fake the irspiocb and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050014394 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
James Smart4f774512009-05-22 14:52:35 -040014395
James Smart0f65ff62010-02-26 14:14:23 -050014396 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14397 spin_lock_irqsave(&phba->hbalock, iflags);
14398 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14399 spin_unlock_irqrestore(&phba->hbalock, iflags);
14400 }
14401
James Smart4f774512009-05-22 14:52:35 -040014402 /* Pass the cmd_iocb and the rsp state to the upper layer */
14403 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14404}
14405
14406/**
14407 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14408 * @phba: Pointer to HBA context object.
14409 * @cq: Pointer to completion queue.
14410 * @wcqe: Pointer to work-queue completion queue entry.
14411 *
Masahiro Yamada3f8b6fb2017-02-27 14:29:25 -080014412 * This routine handles an fast-path WQ entry consumed event by invoking the
James Smart4f774512009-05-22 14:52:35 -040014413 * proper WQ release routine to the slow-path WQ.
14414 **/
14415static void
14416lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14417 struct lpfc_wcqe_release *wcqe)
14418{
14419 struct lpfc_queue *childwq;
14420 bool wqid_matched = false;
James Smart895427b2017-02-12 13:52:30 -080014421 uint16_t hba_wqid;
James Smart4f774512009-05-22 14:52:35 -040014422
14423 /* Check for fast-path FCP work queue release */
James Smart895427b2017-02-12 13:52:30 -080014424 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
James Smart4f774512009-05-22 14:52:35 -040014425 list_for_each_entry(childwq, &cq->child_list, list) {
James Smart895427b2017-02-12 13:52:30 -080014426 if (childwq->queue_id == hba_wqid) {
James Smart4f774512009-05-22 14:52:35 -040014427 lpfc_sli4_wq_release(childwq,
14428 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
James Smart6e8e1c12018-01-30 15:58:49 -080014429 if (childwq->q_flag & HBA_NVMET_WQFULL)
14430 lpfc_nvmet_wqfull_process(phba, childwq);
James Smart4f774512009-05-22 14:52:35 -040014431 wqid_matched = true;
14432 break;
14433 }
14434 }
14435 /* Report warning log message if no match found */
14436 if (wqid_matched != true)
14437 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14438 "2580 Fast-path wqe consume event carries "
James Smart895427b2017-02-12 13:52:30 -080014439 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
James Smart4f774512009-05-22 14:52:35 -040014440}
14441
14442/**
James Smart2d7dbc42017-02-12 13:52:35 -080014443 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14444 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010014445 * @cq: Pointer to completion queue.
James Smart2d7dbc42017-02-12 13:52:35 -080014446 * @rcqe: Pointer to receive-queue completion queue entry.
14447 *
14448 * This routine process a receive-queue completion queue entry.
14449 *
14450 * Return: true if work posted to worker thread, otherwise false.
14451 **/
14452static bool
14453lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14454 struct lpfc_rcqe *rcqe)
14455{
14456 bool workposted = false;
14457 struct lpfc_queue *hrq;
14458 struct lpfc_queue *drq;
14459 struct rqb_dmabuf *dma_buf;
14460 struct fc_frame_header *fc_hdr;
James Smart547077a2017-05-15 15:20:40 -070014461 struct lpfc_nvmet_tgtport *tgtp;
James Smart2d7dbc42017-02-12 13:52:35 -080014462 uint32_t status, rq_id;
14463 unsigned long iflags;
14464 uint32_t fctl, idx;
14465
14466 if ((phba->nvmet_support == 0) ||
14467 (phba->sli4_hba.nvmet_cqset == NULL))
14468 return workposted;
14469
14470 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14471 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14472 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14473
14474 /* sanity check on queue memory */
14475 if (unlikely(!hrq) || unlikely(!drq))
14476 return workposted;
14477
14478 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14479 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14480 else
14481 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14482
14483 if ((phba->nvmet_support == 0) ||
14484 (rq_id != hrq->queue_id))
14485 return workposted;
14486
14487 status = bf_get(lpfc_rcqe_status, rcqe);
14488 switch (status) {
14489 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
Dick Kennedy372c1872020-06-30 14:50:00 -070014490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -080014491 "6126 Receive Frame Truncated!!\n");
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050014492 fallthrough;
James Smart2d7dbc42017-02-12 13:52:35 -080014493 case FC_STATUS_RQ_SUCCESS:
James Smart2d7dbc42017-02-12 13:52:35 -080014494 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartcbc5de12017-12-08 17:18:04 -080014495 lpfc_sli4_rq_release(hrq, drq);
James Smart2d7dbc42017-02-12 13:52:35 -080014496 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14497 if (!dma_buf) {
14498 hrq->RQ_no_buf_found++;
14499 spin_unlock_irqrestore(&phba->hbalock, iflags);
14500 goto out;
14501 }
14502 spin_unlock_irqrestore(&phba->hbalock, iflags);
14503 hrq->RQ_rcv_buf++;
James Smart547077a2017-05-15 15:20:40 -070014504 hrq->RQ_buf_posted--;
James Smart2d7dbc42017-02-12 13:52:35 -080014505 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14506
14507 /* Just some basic sanity checks on FCP Command frame */
14508 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
James Smart3a8070c2020-03-31 09:50:05 -070014509 fc_hdr->fh_f_ctl[1] << 8 |
14510 fc_hdr->fh_f_ctl[2]);
James Smart2d7dbc42017-02-12 13:52:35 -080014511 if (((fctl &
14512 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14513 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14514 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14515 goto drop;
14516
14517 if (fc_hdr->fh_type == FC_TYPE_FCP) {
James Smartd74a89a2019-05-21 17:48:55 -070014518 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
James Smartd613b6a2017-02-12 13:52:37 -080014519 lpfc_nvmet_unsol_fcp_event(
James Smartd74a89a2019-05-21 17:48:55 -070014520 phba, idx, dma_buf, cq->isr_timestamp,
14521 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
James Smart2d7dbc42017-02-12 13:52:35 -080014522 return false;
14523 }
14524drop:
James Smart22b738a2019-03-12 16:30:11 -070014525 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
James Smart2d7dbc42017-02-12 13:52:35 -080014526 break;
James Smart2d7dbc42017-02-12 13:52:35 -080014527 case FC_STATUS_INSUFF_BUF_FRM_DISC:
James Smart547077a2017-05-15 15:20:40 -070014528 if (phba->nvmet_support) {
14529 tgtp = phba->targetport->private;
Dick Kennedy372c1872020-06-30 14:50:00 -070014530 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart547077a2017-05-15 15:20:40 -070014531 "6401 RQE Error x%x, posted %d err_cnt "
14532 "%d: %x %x %x\n",
14533 status, hrq->RQ_buf_posted,
14534 hrq->RQ_no_posted_buf,
14535 atomic_read(&tgtp->rcv_fcp_cmd_in),
14536 atomic_read(&tgtp->rcv_fcp_cmd_out),
14537 atomic_read(&tgtp->xmt_fcp_release));
14538 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050014539 fallthrough;
James Smart547077a2017-05-15 15:20:40 -070014540
14541 case FC_STATUS_INSUFF_BUF_NEED_BUF:
James Smart2d7dbc42017-02-12 13:52:35 -080014542 hrq->RQ_no_posted_buf++;
14543 /* Post more buffers if possible */
James Smart2d7dbc42017-02-12 13:52:35 -080014544 break;
14545 }
14546out:
14547 return workposted;
14548}
14549
14550/**
James Smart895427b2017-02-12 13:52:30 -080014551 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
James Smart32517fc2019-01-28 11:14:33 -080014552 * @phba: adapter with cq
James Smart4f774512009-05-22 14:52:35 -040014553 * @cq: Pointer to the completion queue.
Lee Jones7af29d42020-07-21 17:41:31 +010014554 * @cqe: Pointer to fast-path completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040014555 *
14556 * This routine process a fast-path work queue completion entry from fast-path
14557 * event queue for FCP command response completion.
James Smart32517fc2019-01-28 11:14:33 -080014558 *
14559 * Return: true if work posted to worker thread, otherwise false.
James Smart4f774512009-05-22 14:52:35 -040014560 **/
James Smart32517fc2019-01-28 11:14:33 -080014561static bool
James Smart895427b2017-02-12 13:52:30 -080014562lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040014563 struct lpfc_cqe *cqe)
14564{
14565 struct lpfc_wcqe_release wcqe;
14566 bool workposted = false;
14567
14568 /* Copy the work queue CQE and convert endian order if needed */
James Smart48f8fdb2018-05-04 20:37:51 -070014569 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040014570
14571 /* Check and process for different type of WCQE and dispatch */
14572 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14573 case CQE_CODE_COMPL_WQE:
James Smart895427b2017-02-12 13:52:30 -080014574 case CQE_CODE_NVME_ERSP:
James Smartb84daac2012-08-03 12:35:13 -040014575 cq->CQ_wq++;
James Smart4f774512009-05-22 14:52:35 -040014576 /* Process the WQ complete event */
James Smart98fc5dd2010-06-07 15:24:29 -040014577 phba->last_completion_time = jiffies;
James Smartc00f62e2019-08-14 16:57:11 -070014578 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
James Smart895427b2017-02-12 13:52:30 -080014579 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
James Smart4f774512009-05-22 14:52:35 -040014580 (struct lpfc_wcqe_complete *)&wcqe);
14581 break;
14582 case CQE_CODE_RELEASE_WQE:
James Smartb84daac2012-08-03 12:35:13 -040014583 cq->CQ_release_wqe++;
James Smart4f774512009-05-22 14:52:35 -040014584 /* Process the WQ release event */
14585 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14586 (struct lpfc_wcqe_release *)&wcqe);
14587 break;
14588 case CQE_CODE_XRI_ABORTED:
James Smartb84daac2012-08-03 12:35:13 -040014589 cq->CQ_xri_aborted++;
James Smart4f774512009-05-22 14:52:35 -040014590 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040014591 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040014592 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14593 (struct sli4_wcqe_xri_aborted *)&wcqe);
14594 break;
James Smart895427b2017-02-12 13:52:30 -080014595 case CQE_CODE_RECEIVE_V1:
14596 case CQE_CODE_RECEIVE:
14597 phba->last_completion_time = jiffies;
James Smart2d7dbc42017-02-12 13:52:35 -080014598 if (cq->subtype == LPFC_NVMET) {
14599 workposted = lpfc_sli4_nvmet_handle_rcqe(
14600 phba, cq, (struct lpfc_rcqe *)&wcqe);
14601 }
James Smart895427b2017-02-12 13:52:30 -080014602 break;
James Smart4f774512009-05-22 14:52:35 -040014603 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070014604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -080014605 "0144 Not a valid CQE code: x%x\n",
James Smart4f774512009-05-22 14:52:35 -040014606 bf_get(lpfc_wcqe_c_code, &wcqe));
14607 break;
14608 }
14609 return workposted;
14610}
14611
14612/**
Dick Kennedy317aeb82020-06-30 14:49:59 -070014613 * lpfc_sli4_sched_cq_work - Schedules cq work
14614 * @phba: Pointer to HBA context object.
14615 * @cq: Pointer to CQ
14616 * @cqid: CQ ID
14617 *
14618 * This routine checks the poll mode of the CQ corresponding to
14619 * cq->chann, then either schedules a softirq or queue_work to complete
14620 * cq work.
14621 *
14622 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14623 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14624 *
14625 **/
14626static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14627 struct lpfc_queue *cq, uint16_t cqid)
14628{
14629 int ret = 0;
14630
14631 switch (cq->poll_mode) {
14632 case LPFC_IRQ_POLL:
14633 irq_poll_sched(&cq->iop);
14634 break;
14635 case LPFC_QUEUE_WORK:
14636 default:
14637 if (is_kdump_kernel())
14638 ret = queue_work(phba->wq, &cq->irqwork);
14639 else
14640 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14641 if (!ret)
Dick Kennedy372c1872020-06-30 14:50:00 -070014642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014643 "0383 Cannot schedule queue work "
14644 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14645 cqid, cq->queue_id,
14646 raw_smp_processor_id());
14647 }
14648}
14649
14650/**
James Smart67d12732012-08-03 12:36:13 -040014651 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
James Smart4f774512009-05-22 14:52:35 -040014652 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010014653 * @eq: Pointer to the queue structure.
James Smart4f774512009-05-22 14:52:35 -040014654 * @eqe: Pointer to fast-path event queue entry.
14655 *
14656 * This routine process a event queue entry from the fast-path event queue.
14657 * It will check the MajorCode and MinorCode to determine this is for a
14658 * completion event on a completion queue, if not, an error shall be logged
14659 * and just return. Otherwise, it will get to the corresponding completion
14660 * queue and process all the entries on the completion queue, rearm the
14661 * completion queue, and then return.
14662 **/
Dick Kennedyf485c182017-09-29 17:34:34 -070014663static void
James Smart32517fc2019-01-28 11:14:33 -080014664lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14665 struct lpfc_eqe *eqe)
James Smart4f774512009-05-22 14:52:35 -040014666{
James Smart895427b2017-02-12 13:52:30 -080014667 struct lpfc_queue *cq = NULL;
James Smart32517fc2019-01-28 11:14:33 -080014668 uint32_t qidx = eq->hdwq;
James Smart2d7dbc42017-02-12 13:52:35 -080014669 uint16_t cqid, id;
James Smart4f774512009-05-22 14:52:35 -040014670
James Smartcb5172e2010-03-15 11:25:07 -040014671 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070014672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart67d12732012-08-03 12:36:13 -040014673 "0366 Not a valid completion "
James Smart4f774512009-05-22 14:52:35 -040014674 "event: majorcode=x%x, minorcode=x%x\n",
James Smartcb5172e2010-03-15 11:25:07 -040014675 bf_get_le32(lpfc_eqe_major_code, eqe),
14676 bf_get_le32(lpfc_eqe_minor_code, eqe));
Dick Kennedyf485c182017-09-29 17:34:34 -070014677 return;
James Smart4f774512009-05-22 14:52:35 -040014678 }
14679
James Smart67d12732012-08-03 12:36:13 -040014680 /* Get the reference to the corresponding CQ */
14681 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14682
James Smart6a828b02019-01-28 11:14:31 -080014683 /* Use the fast lookup method first */
14684 if (cqid <= phba->sli4_hba.cq_max) {
14685 cq = phba->sli4_hba.cq_lookup[cqid];
14686 if (cq)
14687 goto work_cq;
James Smartcdb42be2019-01-28 11:14:21 -080014688 }
14689
14690 /* Next check for NVMET completion */
James Smart2d7dbc42017-02-12 13:52:35 -080014691 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14692 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14693 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14694 /* Process NVMET unsol rcv */
14695 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14696 goto process_cq;
14697 }
14698 }
14699
James Smart895427b2017-02-12 13:52:30 -080014700 if (phba->sli4_hba.nvmels_cq &&
14701 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14702 /* Process NVME unsol rcv */
14703 cq = phba->sli4_hba.nvmels_cq;
14704 }
14705
14706 /* Otherwise this is a Slow path event */
14707 if (cq == NULL) {
James Smartcdb42be2019-01-28 11:14:21 -080014708 lpfc_sli4_sp_handle_eqe(phba, eqe,
14709 phba->sli4_hba.hdwq[qidx].hba_eq);
Dick Kennedyf485c182017-09-29 17:34:34 -070014710 return;
James Smart67d12732012-08-03 12:36:13 -040014711 }
14712
James Smart895427b2017-02-12 13:52:30 -080014713process_cq:
James Smart4f774512009-05-22 14:52:35 -040014714 if (unlikely(cqid != cq->queue_id)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070014715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040014716 "0368 Miss-matched fast-path completion "
14717 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14718 cqid, cq->queue_id);
Dick Kennedyf485c182017-09-29 17:34:34 -070014719 return;
James Smart4f774512009-05-22 14:52:35 -040014720 }
14721
James Smart6a828b02019-01-28 11:14:31 -080014722work_cq:
James Smartd74a89a2019-05-21 17:48:55 -070014723#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14724 if (phba->ktime_on)
14725 cq->isr_timestamp = ktime_get_ns();
14726 else
14727 cq->isr_timestamp = 0;
14728#endif
Dick Kennedy317aeb82020-06-30 14:49:59 -070014729 lpfc_sli4_sched_cq_work(phba, cq, cqid);
Dick Kennedyf485c182017-09-29 17:34:34 -070014730}
14731
14732/**
James Smart32517fc2019-01-28 11:14:33 -080014733 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14734 * @cq: Pointer to CQ to be processed
Dick Kennedy317aeb82020-06-30 14:49:59 -070014735 * @poll_mode: Enum lpfc_poll_state to determine poll mode
Dick Kennedyf485c182017-09-29 17:34:34 -070014736 *
James Smart32517fc2019-01-28 11:14:33 -080014737 * This routine calls the cq processing routine with the handler for
14738 * fast path CQEs.
14739 *
14740 * The CQ routine returns two values: the first is the calling status,
14741 * which indicates whether work was queued to the background discovery
14742 * thread. If true, the routine should wakeup the discovery thread;
14743 * the second is the delay parameter. If non-zero, rather than rearming
14744 * the CQ and yet another interrupt, the CQ handler should be queued so
14745 * that it is processed in a subsequent polling action. The value of
14746 * the delay indicates when to reschedule it.
Dick Kennedyf485c182017-09-29 17:34:34 -070014747 **/
14748static void
Dick Kennedy317aeb82020-06-30 14:49:59 -070014749__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14750 enum lpfc_poll_mode poll_mode)
Dick Kennedyf485c182017-09-29 17:34:34 -070014751{
Dick Kennedyf485c182017-09-29 17:34:34 -070014752 struct lpfc_hba *phba = cq->phba;
James Smart32517fc2019-01-28 11:14:33 -080014753 unsigned long delay;
Dick Kennedyf485c182017-09-29 17:34:34 -070014754 bool workposted = false;
Dick Kennedy86ee57a2020-06-30 14:49:55 -070014755 int ret = 0;
Dick Kennedyf485c182017-09-29 17:34:34 -070014756
James Smart32517fc2019-01-28 11:14:33 -080014757 /* process and rearm the CQ */
14758 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014759 &delay, poll_mode);
James Smart32517fc2019-01-28 11:14:33 -080014760
14761 if (delay) {
Dick Kennedy86ee57a2020-06-30 14:49:55 -070014762 if (is_kdump_kernel())
14763 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14764 delay);
14765 else
14766 ret = queue_delayed_work_on(cq->chann, phba->wq,
14767 &cq->sched_irqwork, delay);
14768 if (!ret)
Dick Kennedy372c1872020-06-30 14:50:00 -070014769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedy317aeb82020-06-30 14:49:59 -070014770 "0367 Cannot schedule queue work "
14771 "for cqid=%d on CPU %d\n",
14772 cq->queue_id, cq->chann);
James Smart4f774512009-05-22 14:52:35 -040014773 }
14774
James Smart4f774512009-05-22 14:52:35 -040014775 /* wake up worker thread if there are works to be done */
14776 if (workposted)
14777 lpfc_worker_wake_up(phba);
14778}
14779
James Smart1ba981f2014-02-20 09:56:45 -050014780/**
James Smart32517fc2019-01-28 11:14:33 -080014781 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14782 * interrupt
14783 * @work: pointer to work element
James Smart1ba981f2014-02-20 09:56:45 -050014784 *
James Smart32517fc2019-01-28 11:14:33 -080014785 * translates from the work handler and calls the fast-path handler.
James Smart1ba981f2014-02-20 09:56:45 -050014786 **/
14787static void
James Smart32517fc2019-01-28 11:14:33 -080014788lpfc_sli4_hba_process_cq(struct work_struct *work)
James Smart1ba981f2014-02-20 09:56:45 -050014789{
James Smart32517fc2019-01-28 11:14:33 -080014790 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
James Smart1ba981f2014-02-20 09:56:45 -050014791
Dick Kennedy317aeb82020-06-30 14:49:59 -070014792 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
James Smart1ba981f2014-02-20 09:56:45 -050014793}
14794
14795/**
Lee Jones8514e2f2021-03-03 14:46:18 +000014796 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
James Smart32517fc2019-01-28 11:14:33 -080014797 * @work: pointer to work element
James Smart1ba981f2014-02-20 09:56:45 -050014798 *
James Smart32517fc2019-01-28 11:14:33 -080014799 * translates from the work handler and calls the fast-path handler.
James Smart1ba981f2014-02-20 09:56:45 -050014800 **/
James Smart32517fc2019-01-28 11:14:33 -080014801static void
14802lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
James Smart1ba981f2014-02-20 09:56:45 -050014803{
James Smart32517fc2019-01-28 11:14:33 -080014804 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14805 struct lpfc_queue, sched_irqwork);
James Smart1ba981f2014-02-20 09:56:45 -050014806
Dick Kennedy317aeb82020-06-30 14:49:59 -070014807 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
James Smart1ba981f2014-02-20 09:56:45 -050014808}
14809
James Smart4f774512009-05-22 14:52:35 -040014810/**
James Smart67d12732012-08-03 12:36:13 -040014811 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
James Smart4f774512009-05-22 14:52:35 -040014812 * @irq: Interrupt number.
14813 * @dev_id: The device context pointer.
14814 *
14815 * This function is directly called from the PCI layer as an interrupt
14816 * service routine when device with SLI-4 interface spec is enabled with
14817 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14818 * ring event in the HBA. However, when the device is enabled with either
14819 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14820 * device-level interrupt handler. When the PCI slot is in error recovery
14821 * or the HBA is undergoing initialization, the interrupt handler will not
14822 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14823 * the intrrupt context. This function is called without any lock held.
14824 * It gets the hbalock to access and update SLI data structures. Note that,
14825 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14826 * equal to that of FCP CQ index.
14827 *
James Smart67d12732012-08-03 12:36:13 -040014828 * The link attention and ELS ring attention events are handled
14829 * by the worker thread. The interrupt handler signals the worker thread
14830 * and returns for these events. This function is called without any lock
14831 * held. It gets the hbalock to access and update SLI data structures.
14832 *
James Smart4f774512009-05-22 14:52:35 -040014833 * This function returns IRQ_HANDLED when interrupt is handled else it
14834 * returns IRQ_NONE.
14835 **/
14836irqreturn_t
James Smart67d12732012-08-03 12:36:13 -040014837lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
James Smart4f774512009-05-22 14:52:35 -040014838{
14839 struct lpfc_hba *phba;
James Smart895427b2017-02-12 13:52:30 -080014840 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart4f774512009-05-22 14:52:35 -040014841 struct lpfc_queue *fpeq;
James Smart4f774512009-05-22 14:52:35 -040014842 unsigned long iflag;
14843 int ecount = 0;
James Smart895427b2017-02-12 13:52:30 -080014844 int hba_eqidx;
James Smart32517fc2019-01-28 11:14:33 -080014845 struct lpfc_eq_intr_info *eqi;
James Smart4f774512009-05-22 14:52:35 -040014846
14847 /* Get the driver's phba structure from the dev_id */
James Smart895427b2017-02-12 13:52:30 -080014848 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14849 phba = hba_eq_hdl->phba;
14850 hba_eqidx = hba_eq_hdl->idx;
James Smart4f774512009-05-22 14:52:35 -040014851
14852 if (unlikely(!phba))
14853 return IRQ_NONE;
James Smartcdb42be2019-01-28 11:14:21 -080014854 if (unlikely(!phba->sli4_hba.hdwq))
James Smart5350d872011-10-10 21:33:49 -040014855 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040014856
14857 /* Get to the EQ struct associated with this vector */
James Smart657add42019-05-21 17:49:06 -070014858 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
James Smart2e90f4b2011-12-13 13:22:37 -050014859 if (unlikely(!fpeq))
14860 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040014861
14862 /* Check device state for handling interrupt */
14863 if (unlikely(lpfc_intr_state_check(phba))) {
14864 /* Check again for link_state with lock held */
14865 spin_lock_irqsave(&phba->hbalock, iflag);
14866 if (phba->link_state < LPFC_LINK_DOWN)
14867 /* Flush, clear interrupt, and rearm the EQ */
James Smart24c7c0a2019-09-21 20:58:58 -070014868 lpfc_sli4_eqcq_flush(phba, fpeq);
James Smart4f774512009-05-22 14:52:35 -040014869 spin_unlock_irqrestore(&phba->hbalock, iflag);
14870 return IRQ_NONE;
14871 }
14872
Dick Kennedya7fc0712020-05-01 14:43:08 -070014873 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14874 eqi->icnt++;
14875
Bart Van Assched6d189c2019-03-28 11:06:22 -070014876 fpeq->last_cpu = raw_smp_processor_id();
James Smart4f774512009-05-22 14:52:35 -040014877
Dick Kennedya7fc0712020-05-01 14:43:08 -070014878 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
James Smart8156d372019-10-18 14:18:26 -070014879 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
James Smart32517fc2019-01-28 11:14:33 -080014880 phba->cfg_auto_imax &&
14881 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14882 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14883 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
James Smartb84daac2012-08-03 12:35:13 -040014884
James Smart32517fc2019-01-28 11:14:33 -080014885 /* process and rearm the EQ */
James Smart93a4d6f2019-11-04 16:57:05 -080014886 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
James Smart4f774512009-05-22 14:52:35 -040014887
14888 if (unlikely(ecount == 0)) {
James Smartb84daac2012-08-03 12:35:13 -040014889 fpeq->EQ_no_entry++;
James Smart4f774512009-05-22 14:52:35 -040014890 if (phba->intr_type == MSIX)
14891 /* MSI-X treated interrupt served as no EQ share INT */
14892 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14893 "0358 MSI-X interrupt with no EQE\n");
14894 else
14895 /* Non MSI-X treated on interrupt as EQ share INT */
14896 return IRQ_NONE;
14897 }
14898
14899 return IRQ_HANDLED;
James Smart3bfab8a2021-04-11 18:31:23 -070014900} /* lpfc_sli4_hba_intr_handler */
James Smart4f774512009-05-22 14:52:35 -040014901
14902/**
14903 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14904 * @irq: Interrupt number.
14905 * @dev_id: The device context pointer.
14906 *
14907 * This function is the device-level interrupt handler to device with SLI-4
14908 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14909 * interrupt mode is enabled and there is an event in the HBA which requires
14910 * driver attention. This function invokes the slow-path interrupt attention
14911 * handling function and fast-path interrupt attention handling function in
14912 * turn to process the relevant HBA attention events. This function is called
14913 * without any lock held. It gets the hbalock to access and update SLI data
14914 * structures.
14915 *
14916 * This function returns IRQ_HANDLED when interrupt is handled, else it
14917 * returns IRQ_NONE.
14918 **/
14919irqreturn_t
14920lpfc_sli4_intr_handler(int irq, void *dev_id)
14921{
14922 struct lpfc_hba *phba;
James Smart67d12732012-08-03 12:36:13 -040014923 irqreturn_t hba_irq_rc;
14924 bool hba_handled = false;
James Smart895427b2017-02-12 13:52:30 -080014925 int qidx;
James Smart4f774512009-05-22 14:52:35 -040014926
14927 /* Get the driver's phba structure from the dev_id */
14928 phba = (struct lpfc_hba *)dev_id;
14929
14930 if (unlikely(!phba))
14931 return IRQ_NONE;
14932
14933 /*
James Smart4f774512009-05-22 14:52:35 -040014934 * Invoke fast-path host attention interrupt handling as appropriate.
14935 */
James Smart6a828b02019-01-28 11:14:31 -080014936 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
James Smart67d12732012-08-03 12:36:13 -040014937 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
James Smart895427b2017-02-12 13:52:30 -080014938 &phba->sli4_hba.hba_eq_hdl[qidx]);
James Smart67d12732012-08-03 12:36:13 -040014939 if (hba_irq_rc == IRQ_HANDLED)
14940 hba_handled |= true;
James Smart4f774512009-05-22 14:52:35 -040014941 }
14942
James Smart67d12732012-08-03 12:36:13 -040014943 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040014944} /* lpfc_sli4_intr_handler */
14945
James Smart93a4d6f2019-11-04 16:57:05 -080014946void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14947{
14948 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14949 struct lpfc_queue *eq;
14950 int i = 0;
14951
14952 rcu_read_lock();
14953
14954 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14955 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14956 if (!list_empty(&phba->poll_list))
14957 mod_timer(&phba->cpuhp_poll_timer,
14958 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14959
14960 rcu_read_unlock();
14961}
14962
14963inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14964{
14965 struct lpfc_hba *phba = eq->phba;
14966 int i = 0;
14967
14968 /*
14969 * Unlocking an irq is one of the entry point to check
14970 * for re-schedule, but we are good for io submission
14971 * path as midlayer does a get_cpu to glue us in. Flush
14972 * out the invalidate queue so we can see the updated
14973 * value for flag.
14974 */
14975 smp_rmb();
14976
14977 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14978 /* We will not likely get the completion for the caller
14979 * during this iteration but i guess that's fine.
14980 * Future io's coming on this eq should be able to
14981 * pick it up. As for the case of single io's, they
14982 * will be handled through a sched from polling timer
14983 * function which is currently triggered every 1msec.
14984 */
14985 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14986
14987 return i;
14988}
14989
14990static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14991{
14992 struct lpfc_hba *phba = eq->phba;
14993
James Smartf861f592020-03-22 11:12:54 -070014994 /* kickstart slowpath processing if needed */
14995 if (list_empty(&phba->poll_list))
James Smart93a4d6f2019-11-04 16:57:05 -080014996 mod_timer(&phba->cpuhp_poll_timer,
14997 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
James Smart93a4d6f2019-11-04 16:57:05 -080014998
14999 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15000 synchronize_rcu();
15001}
15002
15003static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15004{
15005 struct lpfc_hba *phba = eq->phba;
15006
15007 /* Disable slowpath processing for this eq. Kick start the eq
15008 * by RE-ARMING the eq's ASAP
15009 */
15010 list_del_rcu(&eq->_poll_list);
15011 synchronize_rcu();
15012
15013 if (list_empty(&phba->poll_list))
15014 del_timer_sync(&phba->cpuhp_poll_timer);
15015}
15016
James Smartd480e572019-11-11 15:03:58 -080015017void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
James Smart93a4d6f2019-11-04 16:57:05 -080015018{
15019 struct lpfc_queue *eq, *next;
15020
15021 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15022 list_del(&eq->_poll_list);
15023
15024 INIT_LIST_HEAD(&phba->poll_list);
15025 synchronize_rcu();
15026}
15027
15028static inline void
15029__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15030{
15031 if (mode == eq->mode)
15032 return;
15033 /*
15034 * currently this function is only called during a hotplug
15035 * event and the cpu on which this function is executing
15036 * is going offline. By now the hotplug has instructed
15037 * the scheduler to remove this cpu from cpu active mask.
15038 * So we don't need to work about being put aside by the
15039 * scheduler for a high priority process. Yes, the inte-
15040 * rrupts could come but they are known to retire ASAP.
15041 */
15042
15043 /* Disable polling in the fastpath */
15044 WRITE_ONCE(eq->mode, mode);
15045 /* flush out the store buffer */
15046 smp_wmb();
15047
15048 /*
15049 * Add this eq to the polling list and start polling. For
15050 * a grace period both interrupt handler and poller will
15051 * try to process the eq _but_ that's fine. We have a
15052 * synchronization mechanism in place (queue_claimed) to
15053 * deal with it. This is just a draining phase for int-
15054 * errupt handler (not eq's) as we have guranteed through
15055 * barrier that all the CPUs have seen the new CQ_POLLED
15056 * state. which will effectively disable the REARMING of
15057 * the EQ. The whole idea is eq's die off eventually as
15058 * we are not rearming EQ's anymore.
15059 */
15060 mode ? lpfc_sli4_add_to_poll_list(eq) :
15061 lpfc_sli4_remove_from_poll_list(eq);
15062}
15063
15064void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15065{
15066 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15067}
15068
15069void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15070{
15071 struct lpfc_hba *phba = eq->phba;
15072
15073 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15074
15075 /* Kick start for the pending io's in h/w.
15076 * Once we switch back to interrupt processing on a eq
15077 * the io path completion will only arm eq's when it
15078 * receives a completion. But since eq's are in disa-
15079 * rmed state it doesn't receive a completion. This
15080 * creates a deadlock scenaro.
15081 */
15082 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15083}
15084
James Smart4f774512009-05-22 14:52:35 -040015085/**
15086 * lpfc_sli4_queue_free - free a queue structure and associated memory
15087 * @queue: The queue structure to free.
15088 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040015089 * This function frees a queue structure and the DMAable memory used for
James Smart4f774512009-05-22 14:52:35 -040015090 * the host resident queue. This function must be called after destroying the
15091 * queue on the HBA.
15092 **/
15093void
15094lpfc_sli4_queue_free(struct lpfc_queue *queue)
15095{
15096 struct lpfc_dmabuf *dmabuf;
15097
15098 if (!queue)
15099 return;
15100
James Smart4645f7b2019-03-12 16:30:14 -070015101 if (!list_empty(&queue->wq_list))
15102 list_del(&queue->wq_list);
15103
James Smart4f774512009-05-22 14:52:35 -040015104 while (!list_empty(&queue->page_list)) {
15105 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15106 list);
James Smart81b96ed2017-11-20 16:00:29 -080015107 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
James Smart4f774512009-05-22 14:52:35 -040015108 dmabuf->virt, dmabuf->phys);
15109 kfree(dmabuf);
15110 }
James Smart895427b2017-02-12 13:52:30 -080015111 if (queue->rqbp) {
15112 lpfc_free_rq_buffer(queue->phba, queue);
15113 kfree(queue->rqbp);
15114 }
James Smartd1f525a2017-04-21 16:04:55 -070015115
James Smart32517fc2019-01-28 11:14:33 -080015116 if (!list_empty(&queue->cpu_list))
15117 list_del(&queue->cpu_list);
15118
James Smart4f774512009-05-22 14:52:35 -040015119 kfree(queue);
15120 return;
15121}
15122
15123/**
15124 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15125 * @phba: The HBA that this queue is being created on.
James Smart81b96ed2017-11-20 16:00:29 -080015126 * @page_size: The size of a queue page
James Smart4f774512009-05-22 14:52:35 -040015127 * @entry_size: The size of each queue entry for this queue.
Lee Jones7af29d42020-07-21 17:41:31 +010015128 * @entry_count: The number of entries that this queue will handle.
James Smartc1a21eb2019-03-12 16:30:29 -070015129 * @cpu: The cpu that will primarily utilize this queue.
James Smart4f774512009-05-22 14:52:35 -040015130 *
15131 * This function allocates a queue structure and the DMAable memory used for
15132 * the host resident queue. This function must be called before creating the
15133 * queue on the HBA.
15134 **/
15135struct lpfc_queue *
James Smart81b96ed2017-11-20 16:00:29 -080015136lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
James Smartc1a21eb2019-03-12 16:30:29 -070015137 uint32_t entry_size, uint32_t entry_count, int cpu)
James Smart4f774512009-05-22 14:52:35 -040015138{
15139 struct lpfc_queue *queue;
15140 struct lpfc_dmabuf *dmabuf;
James Smartcb5172e2010-03-15 11:25:07 -040015141 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart9afbee32019-03-12 16:30:28 -070015142 uint16_t x, pgcnt;
James Smart4f774512009-05-22 14:52:35 -040015143
James Smartcb5172e2010-03-15 11:25:07 -040015144 if (!phba->sli4_hba.pc_sli4_params.supported)
James Smart81b96ed2017-11-20 16:00:29 -080015145 hw_page_size = page_size;
James Smartcb5172e2010-03-15 11:25:07 -040015146
James Smart9afbee32019-03-12 16:30:28 -070015147 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
James Smart895427b2017-02-12 13:52:30 -080015148
15149 /* If needed, Adjust page count to match the max the adapter supports */
James Smart9afbee32019-03-12 16:30:28 -070015150 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15151 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15152
James Smartc1a21eb2019-03-12 16:30:29 -070015153 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15154 GFP_KERNEL, cpu_to_node(cpu));
James Smart9afbee32019-03-12 16:30:28 -070015155 if (!queue)
15156 return NULL;
James Smart895427b2017-02-12 13:52:30 -080015157
James Smart4f774512009-05-22 14:52:35 -040015158 INIT_LIST_HEAD(&queue->list);
James Smart93a4d6f2019-11-04 16:57:05 -080015159 INIT_LIST_HEAD(&queue->_poll_list);
James Smart895427b2017-02-12 13:52:30 -080015160 INIT_LIST_HEAD(&queue->wq_list);
James Smart6e8e1c12018-01-30 15:58:49 -080015161 INIT_LIST_HEAD(&queue->wqfull_list);
James Smart4f774512009-05-22 14:52:35 -040015162 INIT_LIST_HEAD(&queue->page_list);
15163 INIT_LIST_HEAD(&queue->child_list);
James Smart32517fc2019-01-28 11:14:33 -080015164 INIT_LIST_HEAD(&queue->cpu_list);
James Smart81b96ed2017-11-20 16:00:29 -080015165
15166 /* Set queue parameters now. If the system cannot provide memory
15167 * resources, the free routine needs to know what was allocated.
15168 */
James Smart9afbee32019-03-12 16:30:28 -070015169 queue->page_count = pgcnt;
15170 queue->q_pgs = (void **)&queue[1];
15171 queue->entry_cnt_per_pg = hw_page_size / entry_size;
James Smart81b96ed2017-11-20 16:00:29 -080015172 queue->entry_size = entry_size;
15173 queue->entry_count = entry_count;
15174 queue->page_size = hw_page_size;
15175 queue->phba = phba;
15176
James Smart9afbee32019-03-12 16:30:28 -070015177 for (x = 0; x < queue->page_count; x++) {
James Smartc1a21eb2019-03-12 16:30:29 -070015178 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15179 dev_to_node(&phba->pcidev->dev));
James Smart4f774512009-05-22 14:52:35 -040015180 if (!dmabuf)
15181 goto out_fail;
Luis Chamberlain750afb02019-01-04 09:23:09 +010015182 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15183 hw_page_size, &dmabuf->phys,
15184 GFP_KERNEL);
James Smart4f774512009-05-22 14:52:35 -040015185 if (!dmabuf->virt) {
15186 kfree(dmabuf);
15187 goto out_fail;
15188 }
15189 dmabuf->buffer_tag = x;
15190 list_add_tail(&dmabuf->list, &queue->page_list);
James Smart9afbee32019-03-12 16:30:28 -070015191 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15192 queue->q_pgs[x] = dmabuf->virt;
James Smart4f774512009-05-22 14:52:35 -040015193 }
Dick Kennedyf485c182017-09-29 17:34:34 -070015194 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15195 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
James Smart32517fc2019-01-28 11:14:33 -080015196 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15197 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
James Smart4f774512009-05-22 14:52:35 -040015198
James Smart32517fc2019-01-28 11:14:33 -080015199 /* notify_interval will be set during q creation */
James Smart64eb4dc2017-05-15 15:20:49 -070015200
James Smart4f774512009-05-22 14:52:35 -040015201 return queue;
15202out_fail:
15203 lpfc_sli4_queue_free(queue);
15204 return NULL;
15205}
15206
15207/**
James Smart962bc512013-01-03 15:44:00 -050015208 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15209 * @phba: HBA structure that indicates port to create a queue on.
15210 * @pci_barset: PCI BAR set flag.
15211 *
15212 * This function shall perform iomap of the specified PCI BAR address to host
15213 * memory address if not already done so and return it. The returned host
15214 * memory address can be NULL.
15215 */
15216static void __iomem *
15217lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15218{
James Smart962bc512013-01-03 15:44:00 -050015219 if (!phba->pcidev)
15220 return NULL;
James Smart962bc512013-01-03 15:44:00 -050015221
15222 switch (pci_barset) {
15223 case WQ_PCI_BAR_0_AND_1:
James Smart962bc512013-01-03 15:44:00 -050015224 return phba->pci_bar0_memmap_p;
15225 case WQ_PCI_BAR_2_AND_3:
James Smart962bc512013-01-03 15:44:00 -050015226 return phba->pci_bar2_memmap_p;
15227 case WQ_PCI_BAR_4_AND_5:
James Smart962bc512013-01-03 15:44:00 -050015228 return phba->pci_bar4_memmap_p;
15229 default:
15230 break;
15231 }
15232 return NULL;
15233}
15234
15235/**
James Smartcb733e32019-01-28 11:14:32 -080015236 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15237 * @phba: HBA structure that EQs are on.
15238 * @startq: The starting EQ index to modify
15239 * @numq: The number of EQs (consecutive indexes) to modify
15240 * @usdelay: amount of delay
James Smart173edbb2012-06-12 13:54:50 -040015241 *
James Smartcb733e32019-01-28 11:14:32 -080015242 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15243 * is set either by writing to a register (if supported by the SLI Port)
15244 * or by mailbox command. The mailbox command allows several EQs to be
15245 * updated at once.
James Smart173edbb2012-06-12 13:54:50 -040015246 *
James Smartcb733e32019-01-28 11:14:32 -080015247 * The @phba struct is used to send a mailbox command to HBA. The @startq
15248 * is used to get the starting EQ index to change. The @numq value is
15249 * used to specify how many consecutive EQ indexes, starting at EQ index,
15250 * are to be changed. This function is asynchronous and will wait for any
15251 * mailbox commands to finish before returning.
James Smart173edbb2012-06-12 13:54:50 -040015252 *
James Smartcb733e32019-01-28 11:14:32 -080015253 * On success this function will return a zero. If unable to allocate
15254 * enough memory this function will return -ENOMEM. If a mailbox command
15255 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15256 * have had their delay multipler changed.
James Smart173edbb2012-06-12 13:54:50 -040015257 **/
James Smartcb733e32019-01-28 11:14:32 -080015258void
James Smart0cf07f842017-06-01 21:07:10 -070015259lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
James Smartcb733e32019-01-28 11:14:32 -080015260 uint32_t numq, uint32_t usdelay)
James Smart173edbb2012-06-12 13:54:50 -040015261{
15262 struct lpfc_mbx_modify_eq_delay *eq_delay;
15263 LPFC_MBOXQ_t *mbox;
15264 struct lpfc_queue *eq;
James Smartcb733e32019-01-28 11:14:32 -080015265 int cnt = 0, rc, length;
James Smart173edbb2012-06-12 13:54:50 -040015266 uint32_t shdr_status, shdr_add_status;
James Smartcb733e32019-01-28 11:14:32 -080015267 uint32_t dmult;
James Smart895427b2017-02-12 13:52:30 -080015268 int qidx;
James Smart173edbb2012-06-12 13:54:50 -040015269 union lpfc_sli4_cfg_shdr *shdr;
James Smart173edbb2012-06-12 13:54:50 -040015270
James Smart6a828b02019-01-28 11:14:31 -080015271 if (startq >= phba->cfg_irq_chann)
James Smartcb733e32019-01-28 11:14:32 -080015272 return;
15273
15274 if (usdelay > 0xFFFF) {
15275 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15276 "6429 usdelay %d too large. Scaled down to "
15277 "0xFFFF.\n", usdelay);
15278 usdelay = 0xFFFF;
15279 }
15280
15281 /* set values by EQ_DELAY register if supported */
15282 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15283 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
James Smart657add42019-05-21 17:49:06 -070015284 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
James Smartcb733e32019-01-28 11:14:32 -080015285 if (!eq)
15286 continue;
15287
James Smart32517fc2019-01-28 11:14:33 -080015288 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
James Smartcb733e32019-01-28 11:14:32 -080015289
15290 if (++cnt >= numq)
15291 break;
15292 }
James Smartcb733e32019-01-28 11:14:32 -080015293 return;
15294 }
15295
15296 /* Otherwise, set values by mailbox cmd */
James Smart173edbb2012-06-12 13:54:50 -040015297
15298 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smartcb733e32019-01-28 11:14:32 -080015299 if (!mbox) {
Dick Kennedy372c1872020-06-30 14:50:00 -070015300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcb733e32019-01-28 11:14:32 -080015301 "6428 Failed allocating mailbox cmd buffer."
15302 " EQ delay was not set.\n");
15303 return;
15304 }
James Smart173edbb2012-06-12 13:54:50 -040015305 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15306 sizeof(struct lpfc_sli4_cfg_mhdr));
15307 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15308 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15309 length, LPFC_SLI4_MBX_EMBED);
15310 eq_delay = &mbox->u.mqe.un.eq_delay;
15311
15312 /* Calculate delay multiper from maximum interrupt per second */
James Smartcb733e32019-01-28 11:14:32 -080015313 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15314 if (dmult)
15315 dmult--;
James Smart0cf07f842017-06-01 21:07:10 -070015316 if (dmult > LPFC_DMULT_MAX)
15317 dmult = LPFC_DMULT_MAX;
James Smart173edbb2012-06-12 13:54:50 -040015318
James Smart6a828b02019-01-28 11:14:31 -080015319 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
James Smart657add42019-05-21 17:49:06 -070015320 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
James Smart173edbb2012-06-12 13:54:50 -040015321 if (!eq)
15322 continue;
James Smartcb733e32019-01-28 11:14:32 -080015323 eq->q_mode = usdelay;
James Smart173edbb2012-06-12 13:54:50 -040015324 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15325 eq_delay->u.request.eq[cnt].phase = 0;
15326 eq_delay->u.request.eq[cnt].delay_multi = dmult;
James Smart0cf07f842017-06-01 21:07:10 -070015327
James Smartcb733e32019-01-28 11:14:32 -080015328 if (++cnt >= numq)
James Smart173edbb2012-06-12 13:54:50 -040015329 break;
15330 }
15331 eq_delay->u.request.num_eq = cnt;
15332
15333 mbox->vport = phba->pport;
15334 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart3e1f0712018-11-29 16:09:29 -080015335 mbox->ctx_buf = NULL;
15336 mbox->ctx_ndlp = NULL;
James Smart173edbb2012-06-12 13:54:50 -040015337 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15338 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15339 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15340 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15341 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070015342 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart173edbb2012-06-12 13:54:50 -040015343 "2512 MODIFY_EQ_DELAY mailbox failed with "
15344 "status x%x add_status x%x, mbx status x%x\n",
15345 shdr_status, shdr_add_status, rc);
James Smart173edbb2012-06-12 13:54:50 -040015346 }
15347 mempool_free(mbox, phba->mbox_mem_pool);
James Smartcb733e32019-01-28 11:14:32 -080015348 return;
James Smart173edbb2012-06-12 13:54:50 -040015349}
15350
15351/**
James Smart4f774512009-05-22 14:52:35 -040015352 * lpfc_eq_create - Create an Event Queue on the HBA
15353 * @phba: HBA structure that indicates port to create a queue on.
15354 * @eq: The queue structure to use to create the event queue.
15355 * @imax: The maximum interrupt per second limit.
15356 *
15357 * This function creates an event queue, as detailed in @eq, on a port,
15358 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15359 *
15360 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15361 * is used to get the entry count and entry size that are necessary to
15362 * determine the number of pages to allocate and use for this queue. This
15363 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15364 * event queue. This function is asynchronous and will wait for the mailbox
15365 * command to finish before continuing.
15366 *
15367 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040015368 * memory this function will return -ENOMEM. If the queue create mailbox command
15369 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015370 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015371int
James Smartee020062012-09-29 11:28:52 -040015372lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
James Smart4f774512009-05-22 14:52:35 -040015373{
15374 struct lpfc_mbx_eq_create *eq_create;
15375 LPFC_MBOXQ_t *mbox;
15376 int rc, length, status = 0;
15377 struct lpfc_dmabuf *dmabuf;
15378 uint32_t shdr_status, shdr_add_status;
15379 union lpfc_sli4_cfg_shdr *shdr;
15380 uint16_t dmult;
James Smart49198b32010-04-06 15:04:33 -040015381 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15382
James Smart2e90f4b2011-12-13 13:22:37 -050015383 /* sanity check on queue memory */
15384 if (!eq)
15385 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040015386 if (!phba->sli4_hba.pc_sli4_params.supported)
15387 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040015388
15389 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15390 if (!mbox)
15391 return -ENOMEM;
15392 length = (sizeof(struct lpfc_mbx_eq_create) -
15393 sizeof(struct lpfc_sli4_cfg_mhdr));
15394 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15395 LPFC_MBOX_OPCODE_EQ_CREATE,
15396 length, LPFC_SLI4_MBX_EMBED);
15397 eq_create = &mbox->u.mqe.un.eq_create;
James Smart7365f6f2018-02-22 08:18:46 -080015398 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040015399 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15400 eq->page_count);
15401 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15402 LPFC_EQE_SIZE);
15403 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
James Smart7365f6f2018-02-22 08:18:46 -080015404
15405 /* Use version 2 of CREATE_EQ if eqav is set */
15406 if (phba->sli4_hba.pc_sli4_params.eqav) {
15407 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15408 LPFC_Q_CREATE_VERSION_2);
15409 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15410 phba->sli4_hba.pc_sli4_params.eqav);
15411 }
15412
James Smart2c9c5a02015-04-07 15:07:15 -040015413 /* don't setup delay multiplier using EQ_CREATE */
15414 dmult = 0;
James Smart4f774512009-05-22 14:52:35 -040015415 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15416 dmult);
15417 switch (eq->entry_count) {
15418 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070015419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040015420 "0360 Unsupported EQ count. (%d)\n",
15421 eq->entry_count);
James Smart04d210c2019-05-21 17:49:03 -070015422 if (eq->entry_count < 256) {
15423 status = -EINVAL;
15424 goto out;
15425 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050015426 fallthrough; /* otherwise default to smallest count */
James Smart4f774512009-05-22 14:52:35 -040015427 case 256:
15428 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15429 LPFC_EQ_CNT_256);
15430 break;
15431 case 512:
15432 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15433 LPFC_EQ_CNT_512);
15434 break;
15435 case 1024:
15436 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15437 LPFC_EQ_CNT_1024);
15438 break;
15439 case 2048:
15440 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15441 LPFC_EQ_CNT_2048);
15442 break;
15443 case 4096:
15444 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15445 LPFC_EQ_CNT_4096);
15446 break;
15447 }
15448 list_for_each_entry(dmabuf, &eq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040015449 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040015450 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15451 putPaddrLow(dmabuf->phys);
15452 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15453 putPaddrHigh(dmabuf->phys);
15454 }
15455 mbox->vport = phba->pport;
15456 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart3e1f0712018-11-29 16:09:29 -080015457 mbox->ctx_buf = NULL;
15458 mbox->ctx_ndlp = NULL;
James Smart4f774512009-05-22 14:52:35 -040015459 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart4f774512009-05-22 14:52:35 -040015460 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15461 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15462 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070015463 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040015464 "2500 EQ_CREATE mailbox failed with "
15465 "status x%x add_status x%x, mbx status x%x\n",
15466 shdr_status, shdr_add_status, rc);
15467 status = -ENXIO;
15468 }
15469 eq->type = LPFC_EQ;
15470 eq->subtype = LPFC_NONE;
15471 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15472 if (eq->queue_id == 0xFFFF)
15473 status = -ENXIO;
15474 eq->host_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015475 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15476 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
James Smart04d210c2019-05-21 17:49:03 -070015477out:
James Smart8fa38512009-07-19 10:01:03 -040015478 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015479 return status;
15480}
15481
Dick Kennedy317aeb82020-06-30 14:49:59 -070015482static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15483{
15484 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15485
Colin Ian King26e0b9a2020-07-07 16:00:18 +010015486 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
Dick Kennedy317aeb82020-06-30 14:49:59 -070015487
15488 return 1;
15489}
15490
James Smart4f774512009-05-22 14:52:35 -040015491/**
15492 * lpfc_cq_create - Create a Completion Queue on the HBA
15493 * @phba: HBA structure that indicates port to create a queue on.
15494 * @cq: The queue structure to use to create the completion queue.
15495 * @eq: The event queue to bind this completion queue to.
Lee Jones7af29d42020-07-21 17:41:31 +010015496 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15497 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
James Smart4f774512009-05-22 14:52:35 -040015498 *
15499 * This function creates a completion queue, as detailed in @wq, on a port,
15500 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15501 *
15502 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15503 * is used to get the entry count and entry size that are necessary to
15504 * determine the number of pages to allocate and use for this queue. The @eq
15505 * is used to indicate which event queue to bind this completion queue to. This
15506 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15507 * completion queue. This function is asynchronous and will wait for the mailbox
15508 * command to finish before continuing.
15509 *
15510 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040015511 * memory this function will return -ENOMEM. If the queue create mailbox command
15512 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040015513 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040015514int
James Smart4f774512009-05-22 14:52:35 -040015515lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15516 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15517{
15518 struct lpfc_mbx_cq_create *cq_create;
15519 struct lpfc_dmabuf *dmabuf;
15520 LPFC_MBOXQ_t *mbox;
15521 int rc, length, status = 0;
15522 uint32_t shdr_status, shdr_add_status;
15523 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040015524
James Smart2e90f4b2011-12-13 13:22:37 -050015525 /* sanity check on queue memory */
15526 if (!cq || !eq)
15527 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040015528
James Smart4f774512009-05-22 14:52:35 -040015529 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15530 if (!mbox)
15531 return -ENOMEM;
15532 length = (sizeof(struct lpfc_mbx_cq_create) -
15533 sizeof(struct lpfc_sli4_cfg_mhdr));
15534 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15535 LPFC_MBOX_OPCODE_CQ_CREATE,
15536 length, LPFC_SLI4_MBX_EMBED);
15537 cq_create = &mbox->u.mqe.un.cq_create;
James Smart5a6f1332011-03-11 16:05:35 -050015538 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040015539 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15540 cq->page_count);
15541 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15542 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050015543 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15544 phba->sli4_hba.pc_sli4_params.cqv);
15545 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
James Smart81b96ed2017-11-20 16:00:29 -080015546 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15547 (cq->page_size / SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050015548 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15549 eq->queue_id);
James Smart7365f6f2018-02-22 08:18:46 -080015550 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15551 phba->sli4_hba.pc_sli4_params.cqav);
James Smart5a6f1332011-03-11 16:05:35 -050015552 } else {
15553 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15554 eq->queue_id);
15555 }
James Smart4f774512009-05-22 14:52:35 -040015556 switch (cq->entry_count) {
James Smart81b96ed2017-11-20 16:00:29 -080015557 case 2048:
15558 case 4096:
15559 if (phba->sli4_hba.pc_sli4_params.cqv ==
15560 LPFC_Q_CREATE_VERSION_2) {
15561 cq_create->u.request.context.lpfc_cq_context_count =
15562 cq->entry_count;
15563 bf_set(lpfc_cq_context_count,
15564 &cq_create->u.request.context,
15565 LPFC_CQ_CNT_WORD7);
15566 break;
15567 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050015568 fallthrough;
James Smart4f774512009-05-22 14:52:35 -040015569 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070015570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2ea259e2017-02-12 13:52:27 -080015571 "0361 Unsupported CQ count: "
James Smart64eb4dc2017-05-15 15:20:49 -070015572 "entry cnt %d sz %d pg cnt %d\n",
James Smart2ea259e2017-02-12 13:52:27 -080015573 cq->entry_count, cq->entry_size,
James Smart64eb4dc2017-05-15 15:20:49 -070015574 cq->page_count);
James Smart4f4c1862012-06-12 13:54:02 -040015575 if (cq->entry_count < 256) {
15576 status = -EINVAL;
15577 goto out;
15578 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050015579 fallthrough; /* otherwise default to smallest count */
James Smart4f774512009-05-22 14:52:35 -040015580 case 256:
15581 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15582 LPFC_CQ_CNT_256);
15583 break;
15584 case 512:
15585 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15586 LPFC_CQ_CNT_512);
15587 break;
15588 case 1024:
15589 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15590 LPFC_CQ_CNT_1024);
15591 break;
15592 }
15593 list_for_each_entry(dmabuf, &cq->page_list, list) {
James Smart81b96ed2017-11-20 16:00:29 -080015594 memset(dmabuf->virt, 0, cq->page_size);
James Smart4f774512009-05-22 14:52:35 -040015595 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15596 putPaddrLow(dmabuf->phys);
15597 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15598 putPaddrHigh(dmabuf->phys);
15599 }
15600 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15601
15602 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040015603 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15604 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15605 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070015606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040015607 "2501 CQ_CREATE mailbox failed with "
15608 "status x%x add_status x%x, mbx status x%x\n",
15609 shdr_status, shdr_add_status, rc);
15610 status = -ENXIO;
15611 goto out;
15612 }
15613 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15614 if (cq->queue_id == 0xFFFF) {
15615 status = -ENXIO;
15616 goto out;
15617 }
15618 /* link the cq onto the parent eq child list */
15619 list_add_tail(&cq->list, &eq->child_list);
15620 /* Set up completion queue's type and subtype */
15621 cq->type = type;
15622 cq->subtype = subtype;
15623 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
James Smart2a622bf2011-02-16 12:40:06 -050015624 cq->assoc_qid = eq->queue_id;
James Smart6a828b02019-01-28 11:14:31 -080015625 cq->assoc_qp = eq;
James Smart4f774512009-05-22 14:52:35 -040015626 cq->host_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015627 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15628 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
James Smart4f774512009-05-22 14:52:35 -040015629
James Smart6a828b02019-01-28 11:14:31 -080015630 if (cq->queue_id > phba->sli4_hba.cq_max)
15631 phba->sli4_hba.cq_max = cq->queue_id;
Dick Kennedy317aeb82020-06-30 14:49:59 -070015632
15633 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
James Smart8fa38512009-07-19 10:01:03 -040015634out:
15635 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040015636 return status;
15637}
15638
15639/**
James Smart2d7dbc42017-02-12 13:52:35 -080015640 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15641 * @phba: HBA structure that indicates port to create a queue on.
15642 * @cqp: The queue structure array to use to create the completion queues.
James Smartcdb42be2019-01-28 11:14:21 -080015643 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
Lee Jones7af29d42020-07-21 17:41:31 +010015644 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15645 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
James Smart2d7dbc42017-02-12 13:52:35 -080015646 *
15647 * This function creates a set of completion queue, s to support MRQ
15648 * as detailed in @cqp, on a port,
15649 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15650 *
15651 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15652 * is used to get the entry count and entry size that are necessary to
15653 * determine the number of pages to allocate and use for this queue. The @eq
15654 * is used to indicate which event queue to bind this completion queue to. This
15655 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15656 * completion queue. This function is asynchronous and will wait for the mailbox
15657 * command to finish before continuing.
15658 *
15659 * On success this function will return a zero. If unable to allocate enough
15660 * memory this function will return -ENOMEM. If the queue create mailbox command
15661 * fails this function will return -ENXIO.
15662 **/
15663int
15664lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
James Smartcdb42be2019-01-28 11:14:21 -080015665 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15666 uint32_t subtype)
James Smart2d7dbc42017-02-12 13:52:35 -080015667{
15668 struct lpfc_queue *cq;
15669 struct lpfc_queue *eq;
15670 struct lpfc_mbx_cq_create_set *cq_set;
15671 struct lpfc_dmabuf *dmabuf;
15672 LPFC_MBOXQ_t *mbox;
15673 int rc, length, alloclen, status = 0;
15674 int cnt, idx, numcq, page_idx = 0;
15675 uint32_t shdr_status, shdr_add_status;
15676 union lpfc_sli4_cfg_shdr *shdr;
15677 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15678
15679 /* sanity check on queue memory */
15680 numcq = phba->cfg_nvmet_mrq;
James Smartcdb42be2019-01-28 11:14:21 -080015681 if (!cqp || !hdwq || !numcq)
James Smart2d7dbc42017-02-12 13:52:35 -080015682 return -ENODEV;
James Smart2d7dbc42017-02-12 13:52:35 -080015683
15684 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15685 if (!mbox)
15686 return -ENOMEM;
15687
15688 length = sizeof(struct lpfc_mbx_cq_create_set);
15689 length += ((numcq * cqp[0]->page_count) *
15690 sizeof(struct dma_address));
15691 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15692 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15693 LPFC_SLI4_MBX_NEMBED);
15694 if (alloclen < length) {
Dick Kennedy372c1872020-06-30 14:50:00 -070015695 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -080015696 "3098 Allocated DMA memory size (%d) is "
15697 "less than the requested DMA memory size "
15698 "(%d)\n", alloclen, length);
15699 status = -ENOMEM;
15700 goto out;
15701 }
15702 cq_set = mbox->sge_array->addr[0];
15703 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15704 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15705
15706 for (idx = 0; idx < numcq; idx++) {
15707 cq = cqp[idx];
James Smartcdb42be2019-01-28 11:14:21 -080015708 eq = hdwq[idx].hba_eq;
James Smart2d7dbc42017-02-12 13:52:35 -080015709 if (!cq || !eq) {
15710 status = -ENOMEM;
15711 goto out;
15712 }
James Smart81b96ed2017-11-20 16:00:29 -080015713 if (!phba->sli4_hba.pc_sli4_params.supported)
15714 hw_page_size = cq->page_size;
James Smart2d7dbc42017-02-12 13:52:35 -080015715
15716 switch (idx) {
15717 case 0:
15718 bf_set(lpfc_mbx_cq_create_set_page_size,
15719 &cq_set->u.request,
15720 (hw_page_size / SLI4_PAGE_SIZE));
15721 bf_set(lpfc_mbx_cq_create_set_num_pages,
15722 &cq_set->u.request, cq->page_count);
15723 bf_set(lpfc_mbx_cq_create_set_evt,
15724 &cq_set->u.request, 1);
15725 bf_set(lpfc_mbx_cq_create_set_valid,
15726 &cq_set->u.request, 1);
15727 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15728 &cq_set->u.request, 0);
15729 bf_set(lpfc_mbx_cq_create_set_num_cq,
15730 &cq_set->u.request, numcq);
James Smart7365f6f2018-02-22 08:18:46 -080015731 bf_set(lpfc_mbx_cq_create_set_autovalid,
15732 &cq_set->u.request,
15733 phba->sli4_hba.pc_sli4_params.cqav);
James Smart2d7dbc42017-02-12 13:52:35 -080015734 switch (cq->entry_count) {
James Smart81b96ed2017-11-20 16:00:29 -080015735 case 2048:
15736 case 4096:
15737 if (phba->sli4_hba.pc_sli4_params.cqv ==
15738 LPFC_Q_CREATE_VERSION_2) {
15739 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15740 &cq_set->u.request,
15741 cq->entry_count);
15742 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15743 &cq_set->u.request,
15744 LPFC_CQ_CNT_WORD7);
15745 break;
15746 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050015747 fallthrough;
James Smart2d7dbc42017-02-12 13:52:35 -080015748 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070015749 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -080015750 "3118 Bad CQ count. (%d)\n",
15751 cq->entry_count);
15752 if (cq->entry_count < 256) {
15753 status = -EINVAL;
15754 goto out;
15755 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050015756 fallthrough; /* otherwise default to smallest */
James Smart2d7dbc42017-02-12 13:52:35 -080015757 case 256:
15758 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15759 &cq_set->u.request, LPFC_CQ_CNT_256);
15760 break;
15761 case 512:
15762 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15763 &cq_set->u.request, LPFC_CQ_CNT_512);
15764 break;
15765 case 1024:
15766 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15767 &cq_set->u.request, LPFC_CQ_CNT_1024);
15768 break;
15769 }
15770 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15771 &cq_set->u.request, eq->queue_id);
15772 break;
15773 case 1:
15774 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15775 &cq_set->u.request, eq->queue_id);
15776 break;
15777 case 2:
15778 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15779 &cq_set->u.request, eq->queue_id);
15780 break;
15781 case 3:
15782 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15783 &cq_set->u.request, eq->queue_id);
15784 break;
15785 case 4:
15786 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15787 &cq_set->u.request, eq->queue_id);
15788 break;
15789 case 5:
15790 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15791 &cq_set->u.request, eq->queue_id);
15792 break;
15793 case 6:
15794 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15795 &cq_set->u.request, eq->queue_id);
15796 break;
15797 case 7:
15798 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15799 &cq_set->u.request, eq->queue_id);
15800 break;
15801 case 8:
15802 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15803 &cq_set->u.request, eq->queue_id);
15804 break;
15805 case 9:
15806 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15807 &cq_set->u.request, eq->queue_id);
15808 break;
15809 case 10:
15810 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15811 &cq_set->u.request, eq->queue_id);
15812 break;
15813 case 11:
15814 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15815 &cq_set->u.request, eq->queue_id);
15816 break;
15817 case 12:
15818 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15819 &cq_set->u.request, eq->queue_id);
15820 break;
15821 case 13:
15822 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15823 &cq_set->u.request, eq->queue_id);
15824 break;
15825 case 14:
15826 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15827 &cq_set->u.request, eq->queue_id);
15828 break;
15829 case 15:
15830 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15831 &cq_set->u.request, eq->queue_id);
15832 break;
15833 }
15834
15835 /* link the cq onto the parent eq child list */
15836 list_add_tail(&cq->list, &eq->child_list);
15837 /* Set up completion queue's type and subtype */
15838 cq->type = type;
15839 cq->subtype = subtype;
15840 cq->assoc_qid = eq->queue_id;
James Smart6a828b02019-01-28 11:14:31 -080015841 cq->assoc_qp = eq;
James Smart2d7dbc42017-02-12 13:52:35 -080015842 cq->host_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080015843 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15844 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15845 cq->entry_count);
James Smart81b96ed2017-11-20 16:00:29 -080015846 cq->chann = idx;
James Smart2d7dbc42017-02-12 13:52:35 -080015847
15848 rc = 0;
15849 list_for_each_entry(dmabuf, &cq->page_list, list) {
15850 memset(dmabuf->virt, 0, hw_page_size);
15851 cnt = page_idx + dmabuf->buffer_tag;
15852 cq_set->u.request.page[cnt].addr_lo =
15853 putPaddrLow(dmabuf->phys);
15854 cq_set->u.request.page[cnt].addr_hi =
15855 putPaddrHigh(dmabuf->phys);
15856 rc++;
15857 }
15858 page_idx += rc;
15859 }
15860
15861 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15862
15863 /* The IOCTL status is embedded in the mailbox subheader. */
15864 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15865 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15866 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070015867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -080015868 "3119 CQ_CREATE_SET mailbox failed with "
15869 "status x%x add_status x%x, mbx status x%x\n",
15870 shdr_status, shdr_add_status, rc);
15871 status = -ENXIO;
15872 goto out;
15873 }
15874 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15875 if (rc == 0xFFFF) {
15876 status = -ENXIO;
15877 goto out;
15878 }
15879
15880 for (idx = 0; idx < numcq; idx++) {
15881 cq = cqp[idx];
15882 cq->queue_id = rc + idx;
James Smart6a828b02019-01-28 11:14:31 -080015883 if (cq->queue_id > phba->sli4_hba.cq_max)
15884 phba->sli4_hba.cq_max = cq->queue_id;
James Smart2d7dbc42017-02-12 13:52:35 -080015885 }
15886
15887out:
15888 lpfc_sli4_mbox_cmd_free(phba, mbox);
15889 return status;
15890}
15891
15892/**
James Smartb19a0612010-04-06 14:48:51 -040015893 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
James Smart04c68492009-05-22 14:52:52 -040015894 * @phba: HBA structure that indicates port to create a queue on.
15895 * @mq: The queue structure to use to create the mailbox queue.
James Smartb19a0612010-04-06 14:48:51 -040015896 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15897 * @cq: The completion queue to associate with this cq.
James Smart04c68492009-05-22 14:52:52 -040015898 *
James Smartb19a0612010-04-06 14:48:51 -040015899 * This function provides failback (fb) functionality when the
15900 * mq_create_ext fails on older FW generations. It's purpose is identical
15901 * to mq_create_ext otherwise.
James Smart04c68492009-05-22 14:52:52 -040015902 *
James Smartb19a0612010-04-06 14:48:51 -040015903 * This routine cannot fail as all attributes were previously accessed and
15904 * initialized in mq_create_ext.
James Smart04c68492009-05-22 14:52:52 -040015905 **/
James Smartb19a0612010-04-06 14:48:51 -040015906static void
15907lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15908 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
James Smart04c68492009-05-22 14:52:52 -040015909{
15910 struct lpfc_mbx_mq_create *mq_create;
15911 struct lpfc_dmabuf *dmabuf;
James Smartb19a0612010-04-06 14:48:51 -040015912 int length;
James Smart04c68492009-05-22 14:52:52 -040015913
James Smart04c68492009-05-22 14:52:52 -040015914 length = (sizeof(struct lpfc_mbx_mq_create) -
15915 sizeof(struct lpfc_sli4_cfg_mhdr));
15916 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15917 LPFC_MBOX_OPCODE_MQ_CREATE,
15918 length, LPFC_SLI4_MBX_EMBED);
15919 mq_create = &mbox->u.mqe.un.mq_create;
15920 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
James Smartb19a0612010-04-06 14:48:51 -040015921 mq->page_count);
James Smart04c68492009-05-22 14:52:52 -040015922 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
James Smartb19a0612010-04-06 14:48:51 -040015923 cq->queue_id);
James Smart04c68492009-05-22 14:52:52 -040015924 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15925 switch (mq->entry_count) {
James Smart04c68492009-05-22 14:52:52 -040015926 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050015927 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15928 LPFC_MQ_RING_SIZE_16);
James Smart04c68492009-05-22 14:52:52 -040015929 break;
15930 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050015931 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15932 LPFC_MQ_RING_SIZE_32);
James Smart04c68492009-05-22 14:52:52 -040015933 break;
15934 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050015935 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15936 LPFC_MQ_RING_SIZE_64);
James Smart04c68492009-05-22 14:52:52 -040015937 break;
15938 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050015939 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15940 LPFC_MQ_RING_SIZE_128);
James Smart04c68492009-05-22 14:52:52 -040015941 break;
15942 }
15943 list_for_each_entry(dmabuf, &mq->page_list, list) {
15944 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
James Smartb19a0612010-04-06 14:48:51 -040015945 putPaddrLow(dmabuf->phys);
James Smart04c68492009-05-22 14:52:52 -040015946 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smartb19a0612010-04-06 14:48:51 -040015947 putPaddrHigh(dmabuf->phys);
15948 }
15949}
15950
15951/**
15952 * lpfc_mq_create - Create a mailbox Queue on the HBA
15953 * @phba: HBA structure that indicates port to create a queue on.
15954 * @mq: The queue structure to use to create the mailbox queue.
15955 * @cq: The completion queue to associate with this cq.
15956 * @subtype: The queue's subtype.
15957 *
15958 * This function creates a mailbox queue, as detailed in @mq, on a port,
15959 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15960 *
15961 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15962 * is used to get the entry count and entry size that are necessary to
15963 * determine the number of pages to allocate and use for this queue. This
15964 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15965 * mailbox queue. This function is asynchronous and will wait for the mailbox
15966 * command to finish before continuing.
15967 *
15968 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040015969 * memory this function will return -ENOMEM. If the queue create mailbox command
15970 * fails this function will return -ENXIO.
James Smartb19a0612010-04-06 14:48:51 -040015971 **/
15972int32_t
15973lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15974 struct lpfc_queue *cq, uint32_t subtype)
15975{
15976 struct lpfc_mbx_mq_create *mq_create;
15977 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15978 struct lpfc_dmabuf *dmabuf;
15979 LPFC_MBOXQ_t *mbox;
15980 int rc, length, status = 0;
15981 uint32_t shdr_status, shdr_add_status;
15982 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040015983 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smartb19a0612010-04-06 14:48:51 -040015984
James Smart2e90f4b2011-12-13 13:22:37 -050015985 /* sanity check on queue memory */
15986 if (!mq || !cq)
15987 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040015988 if (!phba->sli4_hba.pc_sli4_params.supported)
15989 hw_page_size = SLI4_PAGE_SIZE;
James Smartb19a0612010-04-06 14:48:51 -040015990
15991 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15992 if (!mbox)
15993 return -ENOMEM;
15994 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15995 sizeof(struct lpfc_sli4_cfg_mhdr));
15996 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15997 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15998 length, LPFC_SLI4_MBX_EMBED);
15999
16000 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
James Smart5a6f1332011-03-11 16:05:35 -050016001 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
James Smart70f3c072010-12-15 17:57:33 -050016002 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16003 &mq_create_ext->u.request, mq->page_count);
16004 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16005 &mq_create_ext->u.request, 1);
16006 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
James Smartb19a0612010-04-06 14:48:51 -040016007 &mq_create_ext->u.request, 1);
16008 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16009 &mq_create_ext->u.request, 1);
James Smart70f3c072010-12-15 17:57:33 -050016010 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16011 &mq_create_ext->u.request, 1);
16012 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16013 &mq_create_ext->u.request, 1);
James Smartb19a0612010-04-06 14:48:51 -040016014 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050016015 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16016 phba->sli4_hba.pc_sli4_params.mqv);
16017 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16018 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16019 cq->queue_id);
16020 else
16021 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16022 cq->queue_id);
James Smartb19a0612010-04-06 14:48:51 -040016023 switch (mq->entry_count) {
16024 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070016025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartb19a0612010-04-06 14:48:51 -040016026 "0362 Unsupported MQ count. (%d)\n",
16027 mq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040016028 if (mq->entry_count < 16) {
16029 status = -EINVAL;
16030 goto out;
16031 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050016032 fallthrough; /* otherwise default to smallest count */
James Smartb19a0612010-04-06 14:48:51 -040016033 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050016034 bf_set(lpfc_mq_context_ring_size,
16035 &mq_create_ext->u.request.context,
16036 LPFC_MQ_RING_SIZE_16);
James Smartb19a0612010-04-06 14:48:51 -040016037 break;
16038 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050016039 bf_set(lpfc_mq_context_ring_size,
16040 &mq_create_ext->u.request.context,
16041 LPFC_MQ_RING_SIZE_32);
James Smartb19a0612010-04-06 14:48:51 -040016042 break;
16043 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050016044 bf_set(lpfc_mq_context_ring_size,
16045 &mq_create_ext->u.request.context,
16046 LPFC_MQ_RING_SIZE_64);
James Smartb19a0612010-04-06 14:48:51 -040016047 break;
16048 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050016049 bf_set(lpfc_mq_context_ring_size,
16050 &mq_create_ext->u.request.context,
16051 LPFC_MQ_RING_SIZE_128);
James Smartb19a0612010-04-06 14:48:51 -040016052 break;
16053 }
16054 list_for_each_entry(dmabuf, &mq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040016055 memset(dmabuf->virt, 0, hw_page_size);
James Smartb19a0612010-04-06 14:48:51 -040016056 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16057 putPaddrLow(dmabuf->phys);
16058 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smart04c68492009-05-22 14:52:52 -040016059 putPaddrHigh(dmabuf->phys);
16060 }
16061 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smartb19a0612010-04-06 14:48:51 -040016062 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16063 &mq_create_ext->u.response);
16064 if (rc != MBX_SUCCESS) {
16065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16066 "2795 MQ_CREATE_EXT failed with "
16067 "status x%x. Failback to MQ_CREATE.\n",
16068 rc);
16069 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16070 mq_create = &mbox->u.mqe.un.mq_create;
16071 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16072 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16073 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16074 &mq_create->u.response);
16075 }
16076
James Smart04c68492009-05-22 14:52:52 -040016077 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart04c68492009-05-22 14:52:52 -040016078 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16079 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16080 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016081 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart04c68492009-05-22 14:52:52 -040016082 "2502 MQ_CREATE mailbox failed with "
16083 "status x%x add_status x%x, mbx status x%x\n",
16084 shdr_status, shdr_add_status, rc);
16085 status = -ENXIO;
16086 goto out;
16087 }
James Smart04c68492009-05-22 14:52:52 -040016088 if (mq->queue_id == 0xFFFF) {
16089 status = -ENXIO;
16090 goto out;
16091 }
16092 mq->type = LPFC_MQ;
James Smart2a622bf2011-02-16 12:40:06 -050016093 mq->assoc_qid = cq->queue_id;
James Smart04c68492009-05-22 14:52:52 -040016094 mq->subtype = subtype;
16095 mq->host_index = 0;
16096 mq->hba_index = 0;
16097
16098 /* link the mq onto the parent cq child list */
16099 list_add_tail(&mq->list, &cq->child_list);
16100out:
James Smart8fa38512009-07-19 10:01:03 -040016101 mempool_free(mbox, phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040016102 return status;
16103}
16104
16105/**
James Smart4f774512009-05-22 14:52:35 -040016106 * lpfc_wq_create - Create a Work Queue on the HBA
16107 * @phba: HBA structure that indicates port to create a queue on.
16108 * @wq: The queue structure to use to create the work queue.
16109 * @cq: The completion queue to bind this work queue to.
16110 * @subtype: The subtype of the work queue indicating its functionality.
16111 *
16112 * This function creates a work queue, as detailed in @wq, on a port, described
16113 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16114 *
16115 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16116 * is used to get the entry count and entry size that are necessary to
16117 * determine the number of pages to allocate and use for this queue. The @cq
16118 * is used to indicate which completion queue to bind this work queue to. This
16119 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16120 * work queue. This function is asynchronous and will wait for the mailbox
16121 * command to finish before continuing.
16122 *
16123 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040016124 * memory this function will return -ENOMEM. If the queue create mailbox command
16125 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016126 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016127int
James Smart4f774512009-05-22 14:52:35 -040016128lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16129 struct lpfc_queue *cq, uint32_t subtype)
16130{
16131 struct lpfc_mbx_wq_create *wq_create;
16132 struct lpfc_dmabuf *dmabuf;
16133 LPFC_MBOXQ_t *mbox;
16134 int rc, length, status = 0;
16135 uint32_t shdr_status, shdr_add_status;
16136 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040016137 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart5a6f1332011-03-11 16:05:35 -050016138 struct dma_address *page;
James Smart962bc512013-01-03 15:44:00 -050016139 void __iomem *bar_memmap_p;
16140 uint32_t db_offset;
16141 uint16_t pci_barset;
James Smart1351e692018-02-22 08:18:43 -080016142 uint8_t dpp_barset;
16143 uint32_t dpp_offset;
James Smart81b96ed2017-11-20 16:00:29 -080016144 uint8_t wq_create_version;
Lee Jones11d8e562020-07-21 17:41:44 +010016145#ifdef CONFIG_X86
16146 unsigned long pg_addr;
16147#endif
James Smart49198b32010-04-06 15:04:33 -040016148
James Smart2e90f4b2011-12-13 13:22:37 -050016149 /* sanity check on queue memory */
16150 if (!wq || !cq)
16151 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040016152 if (!phba->sli4_hba.pc_sli4_params.supported)
James Smart81b96ed2017-11-20 16:00:29 -080016153 hw_page_size = wq->page_size;
James Smart4f774512009-05-22 14:52:35 -040016154
16155 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16156 if (!mbox)
16157 return -ENOMEM;
16158 length = (sizeof(struct lpfc_mbx_wq_create) -
16159 sizeof(struct lpfc_sli4_cfg_mhdr));
16160 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16161 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16162 length, LPFC_SLI4_MBX_EMBED);
16163 wq_create = &mbox->u.mqe.un.wq_create;
James Smart5a6f1332011-03-11 16:05:35 -050016164 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040016165 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16166 wq->page_count);
16167 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16168 cq->queue_id);
James Smart0c651872013-07-15 18:33:23 -040016169
16170 /* wqv is the earliest version supported, NOT the latest */
James Smart5a6f1332011-03-11 16:05:35 -050016171 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16172 phba->sli4_hba.pc_sli4_params.wqv);
James Smart962bc512013-01-03 15:44:00 -050016173
James Smartc176ffa2018-01-30 15:58:46 -080016174 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16175 (wq->page_size > SLI4_PAGE_SIZE))
James Smart81b96ed2017-11-20 16:00:29 -080016176 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16177 else
16178 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16179
James Smart1351e692018-02-22 08:18:43 -080016180 switch (wq_create_version) {
James Smart0c651872013-07-15 18:33:23 -040016181 case LPFC_Q_CREATE_VERSION_1:
James Smart5a6f1332011-03-11 16:05:35 -050016182 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16183 wq->entry_count);
James Smart3f247de2017-04-21 16:04:56 -070016184 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16185 LPFC_Q_CREATE_VERSION_1);
16186
James Smart5a6f1332011-03-11 16:05:35 -050016187 switch (wq->entry_size) {
16188 default:
16189 case 64:
16190 bf_set(lpfc_mbx_wq_create_wqe_size,
16191 &wq_create->u.request_1,
16192 LPFC_WQ_WQE_SIZE_64);
16193 break;
16194 case 128:
16195 bf_set(lpfc_mbx_wq_create_wqe_size,
16196 &wq_create->u.request_1,
16197 LPFC_WQ_WQE_SIZE_128);
16198 break;
16199 }
James Smart1351e692018-02-22 08:18:43 -080016200 /* Request DPP by default */
16201 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
James Smart8ea73db2017-02-12 13:52:25 -080016202 bf_set(lpfc_mbx_wq_create_page_size,
16203 &wq_create->u.request_1,
James Smart81b96ed2017-11-20 16:00:29 -080016204 (wq->page_size / SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050016205 page = wq_create->u.request_1.page;
James Smart0c651872013-07-15 18:33:23 -040016206 break;
16207 default:
James Smart1351e692018-02-22 08:18:43 -080016208 page = wq_create->u.request.page;
16209 break;
James Smart5a6f1332011-03-11 16:05:35 -050016210 }
James Smart0c651872013-07-15 18:33:23 -040016211
James Smart4f774512009-05-22 14:52:35 -040016212 list_for_each_entry(dmabuf, &wq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040016213 memset(dmabuf->virt, 0, hw_page_size);
James Smart5a6f1332011-03-11 16:05:35 -050016214 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16215 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
James Smart4f774512009-05-22 14:52:35 -040016216 }
James Smart962bc512013-01-03 15:44:00 -050016217
16218 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16219 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16220
James Smart4f774512009-05-22 14:52:35 -040016221 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16222 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040016223 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16224 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16225 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040016227 "2503 WQ_CREATE mailbox failed with "
16228 "status x%x add_status x%x, mbx status x%x\n",
16229 shdr_status, shdr_add_status, rc);
16230 status = -ENXIO;
16231 goto out;
16232 }
James Smart1351e692018-02-22 08:18:43 -080016233
16234 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16235 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16236 &wq_create->u.response);
16237 else
16238 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16239 &wq_create->u.response_1);
16240
James Smart4f774512009-05-22 14:52:35 -040016241 if (wq->queue_id == 0xFFFF) {
16242 status = -ENXIO;
16243 goto out;
16244 }
James Smart1351e692018-02-22 08:18:43 -080016245
16246 wq->db_format = LPFC_DB_LIST_FORMAT;
16247 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16248 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16249 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16250 &wq_create->u.response);
16251 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16252 (wq->db_format != LPFC_DB_RING_FORMAT)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1351e692018-02-22 08:18:43 -080016254 "3265 WQ[%d] doorbell format "
16255 "not supported: x%x\n",
16256 wq->queue_id, wq->db_format);
16257 status = -EINVAL;
16258 goto out;
16259 }
16260 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16261 &wq_create->u.response);
16262 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16263 pci_barset);
16264 if (!bar_memmap_p) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1351e692018-02-22 08:18:43 -080016266 "3263 WQ[%d] failed to memmap "
16267 "pci barset:x%x\n",
16268 wq->queue_id, pci_barset);
16269 status = -ENOMEM;
16270 goto out;
16271 }
16272 db_offset = wq_create->u.response.doorbell_offset;
16273 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16274 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1351e692018-02-22 08:18:43 -080016276 "3252 WQ[%d] doorbell offset "
16277 "not supported: x%x\n",
16278 wq->queue_id, db_offset);
16279 status = -EINVAL;
16280 goto out;
16281 }
16282 wq->db_regaddr = bar_memmap_p + db_offset;
16283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16284 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16285 "format:x%x\n", wq->queue_id,
16286 pci_barset, db_offset, wq->db_format);
16287 } else
16288 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
James Smart962bc512013-01-03 15:44:00 -050016289 } else {
James Smart1351e692018-02-22 08:18:43 -080016290 /* Check if DPP was honored by the firmware */
16291 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16292 &wq_create->u.response_1);
16293 if (wq->dpp_enable) {
16294 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16295 &wq_create->u.response_1);
16296 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16297 pci_barset);
16298 if (!bar_memmap_p) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1351e692018-02-22 08:18:43 -080016300 "3267 WQ[%d] failed to memmap "
16301 "pci barset:x%x\n",
16302 wq->queue_id, pci_barset);
16303 status = -ENOMEM;
16304 goto out;
16305 }
16306 db_offset = wq_create->u.response_1.doorbell_offset;
16307 wq->db_regaddr = bar_memmap_p + db_offset;
16308 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16309 &wq_create->u.response_1);
16310 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16311 &wq_create->u.response_1);
16312 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16313 dpp_barset);
16314 if (!bar_memmap_p) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1351e692018-02-22 08:18:43 -080016316 "3268 WQ[%d] failed to memmap "
16317 "pci barset:x%x\n",
16318 wq->queue_id, dpp_barset);
16319 status = -ENOMEM;
16320 goto out;
16321 }
16322 dpp_offset = wq_create->u.response_1.dpp_offset;
16323 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16324 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16325 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16326 "dpp_id:x%x dpp_barset:x%x "
16327 "dpp_offset:x%x\n",
16328 wq->queue_id, pci_barset, db_offset,
16329 wq->dpp_id, dpp_barset, dpp_offset);
16330
Lee Jones3c1311a2020-07-21 17:41:28 +010016331#ifdef CONFIG_X86
James Smart1351e692018-02-22 08:18:43 -080016332 /* Enable combined writes for DPP aperture */
16333 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
James Smart1351e692018-02-22 08:18:43 -080016334 rc = set_memory_wc(pg_addr, 1);
16335 if (rc) {
16336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16337 "3272 Cannot setup Combined "
16338 "Write on WQ[%d] - disable DPP\n",
16339 wq->queue_id);
16340 phba->cfg_enable_dpp = 0;
16341 }
16342#else
16343 phba->cfg_enable_dpp = 0;
16344#endif
16345 } else
16346 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
James Smart962bc512013-01-03 15:44:00 -050016347 }
James Smart895427b2017-02-12 13:52:30 -080016348 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16349 if (wq->pring == NULL) {
16350 status = -ENOMEM;
16351 goto out;
16352 }
James Smart4f774512009-05-22 14:52:35 -040016353 wq->type = LPFC_WQ;
James Smart2a622bf2011-02-16 12:40:06 -050016354 wq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040016355 wq->subtype = subtype;
16356 wq->host_index = 0;
16357 wq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016358 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
James Smart4f774512009-05-22 14:52:35 -040016359
16360 /* link the wq onto the parent cq child list */
16361 list_add_tail(&wq->list, &cq->child_list);
16362out:
James Smart8fa38512009-07-19 10:01:03 -040016363 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016364 return status;
16365}
16366
16367/**
16368 * lpfc_rq_create - Create a Receive Queue on the HBA
16369 * @phba: HBA structure that indicates port to create a queue on.
16370 * @hrq: The queue structure to use to create the header receive queue.
16371 * @drq: The queue structure to use to create the data receive queue.
16372 * @cq: The completion queue to bind this work queue to.
Lee Jones7af29d42020-07-21 17:41:31 +010016373 * @subtype: The subtype of the work queue indicating its functionality.
James Smart4f774512009-05-22 14:52:35 -040016374 *
16375 * This function creates a receive buffer queue pair , as detailed in @hrq and
16376 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16377 * to the HBA.
16378 *
16379 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16380 * struct is used to get the entry count that is necessary to determine the
16381 * number of pages to use for this queue. The @cq is used to indicate which
16382 * completion queue to bind received buffers that are posted to these queues to.
16383 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16384 * receive queue pair. This function is asynchronous and will wait for the
16385 * mailbox command to finish before continuing.
16386 *
16387 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040016388 * memory this function will return -ENOMEM. If the queue create mailbox command
16389 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016390 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016391int
James Smart4f774512009-05-22 14:52:35 -040016392lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16393 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16394{
16395 struct lpfc_mbx_rq_create *rq_create;
16396 struct lpfc_dmabuf *dmabuf;
16397 LPFC_MBOXQ_t *mbox;
16398 int rc, length, status = 0;
16399 uint32_t shdr_status, shdr_add_status;
16400 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040016401 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart962bc512013-01-03 15:44:00 -050016402 void __iomem *bar_memmap_p;
16403 uint32_t db_offset;
16404 uint16_t pci_barset;
James Smart49198b32010-04-06 15:04:33 -040016405
James Smart2e90f4b2011-12-13 13:22:37 -050016406 /* sanity check on queue memory */
16407 if (!hrq || !drq || !cq)
16408 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040016409 if (!phba->sli4_hba.pc_sli4_params.supported)
16410 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040016411
16412 if (hrq->entry_count != drq->entry_count)
16413 return -EINVAL;
16414 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16415 if (!mbox)
16416 return -ENOMEM;
16417 length = (sizeof(struct lpfc_mbx_rq_create) -
16418 sizeof(struct lpfc_sli4_cfg_mhdr));
16419 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16420 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16421 length, LPFC_SLI4_MBX_EMBED);
16422 rq_create = &mbox->u.mqe.un.rq_create;
James Smart5a6f1332011-03-11 16:05:35 -050016423 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16424 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16425 phba->sli4_hba.pc_sli4_params.rqv);
16426 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16427 bf_set(lpfc_rq_context_rqe_count_1,
16428 &rq_create->u.request.context,
16429 hrq->entry_count);
16430 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040016431 bf_set(lpfc_rq_context_rqe_size,
16432 &rq_create->u.request.context,
16433 LPFC_RQE_SIZE_8);
16434 bf_set(lpfc_rq_context_page_size,
16435 &rq_create->u.request.context,
James Smart8ea73db2017-02-12 13:52:25 -080016436 LPFC_RQ_PAGE_SIZE_4096);
James Smart5a6f1332011-03-11 16:05:35 -050016437 } else {
16438 switch (hrq->entry_count) {
16439 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070016440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5a6f1332011-03-11 16:05:35 -050016441 "2535 Unsupported RQ count. (%d)\n",
16442 hrq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040016443 if (hrq->entry_count < 512) {
16444 status = -EINVAL;
16445 goto out;
16446 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050016447 fallthrough; /* otherwise default to smallest count */
James Smart5a6f1332011-03-11 16:05:35 -050016448 case 512:
16449 bf_set(lpfc_rq_context_rqe_count,
16450 &rq_create->u.request.context,
16451 LPFC_RQ_RING_SIZE_512);
16452 break;
16453 case 1024:
16454 bf_set(lpfc_rq_context_rqe_count,
16455 &rq_create->u.request.context,
16456 LPFC_RQ_RING_SIZE_1024);
16457 break;
16458 case 2048:
16459 bf_set(lpfc_rq_context_rqe_count,
16460 &rq_create->u.request.context,
16461 LPFC_RQ_RING_SIZE_2048);
16462 break;
16463 case 4096:
16464 bf_set(lpfc_rq_context_rqe_count,
16465 &rq_create->u.request.context,
16466 LPFC_RQ_RING_SIZE_4096);
16467 break;
16468 }
16469 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16470 LPFC_HDR_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040016471 }
16472 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16473 cq->queue_id);
16474 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16475 hrq->page_count);
James Smart4f774512009-05-22 14:52:35 -040016476 list_for_each_entry(dmabuf, &hrq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040016477 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040016478 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16479 putPaddrLow(dmabuf->phys);
16480 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16481 putPaddrHigh(dmabuf->phys);
16482 }
James Smart962bc512013-01-03 15:44:00 -050016483 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16484 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16485
James Smart4f774512009-05-22 14:52:35 -040016486 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16487 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040016488 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16489 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16490 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040016492 "2504 RQ_CREATE mailbox failed with "
16493 "status x%x add_status x%x, mbx status x%x\n",
16494 shdr_status, shdr_add_status, rc);
16495 status = -ENXIO;
16496 goto out;
16497 }
16498 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16499 if (hrq->queue_id == 0xFFFF) {
16500 status = -ENXIO;
16501 goto out;
16502 }
James Smart962bc512013-01-03 15:44:00 -050016503
16504 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16505 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16506 &rq_create->u.response);
16507 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16508 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart962bc512013-01-03 15:44:00 -050016510 "3262 RQ [%d] doorbell format not "
16511 "supported: x%x\n", hrq->queue_id,
16512 hrq->db_format);
16513 status = -EINVAL;
16514 goto out;
16515 }
16516
16517 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16518 &rq_create->u.response);
16519 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16520 if (!bar_memmap_p) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart962bc512013-01-03 15:44:00 -050016522 "3269 RQ[%d] failed to memmap pci "
16523 "barset:x%x\n", hrq->queue_id,
16524 pci_barset);
16525 status = -ENOMEM;
16526 goto out;
16527 }
16528
16529 db_offset = rq_create->u.response.doorbell_offset;
16530 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16531 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart962bc512013-01-03 15:44:00 -050016533 "3270 RQ[%d] doorbell offset not "
16534 "supported: x%x\n", hrq->queue_id,
16535 db_offset);
16536 status = -EINVAL;
16537 goto out;
16538 }
16539 hrq->db_regaddr = bar_memmap_p + db_offset;
16540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta22e7db2013-04-17 20:16:37 -040016541 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16542 "format:x%x\n", hrq->queue_id, pci_barset,
16543 db_offset, hrq->db_format);
James Smart962bc512013-01-03 15:44:00 -050016544 } else {
16545 hrq->db_format = LPFC_DB_RING_FORMAT;
16546 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16547 }
James Smart4f774512009-05-22 14:52:35 -040016548 hrq->type = LPFC_HRQ;
James Smart2a622bf2011-02-16 12:40:06 -050016549 hrq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040016550 hrq->subtype = subtype;
16551 hrq->host_index = 0;
16552 hrq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016553 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart4f774512009-05-22 14:52:35 -040016554
16555 /* now create the data queue */
16556 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16557 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16558 length, LPFC_SLI4_MBX_EMBED);
James Smart5a6f1332011-03-11 16:05:35 -050016559 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16560 phba->sli4_hba.pc_sli4_params.rqv);
16561 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16562 bf_set(lpfc_rq_context_rqe_count_1,
James Smartc31098c2011-04-16 11:03:33 -040016563 &rq_create->u.request.context, hrq->entry_count);
James Smart3c603be2017-05-15 15:20:44 -070016564 if (subtype == LPFC_NVMET)
16565 rq_create->u.request.context.buffer_size =
16566 LPFC_NVMET_DATA_BUF_SIZE;
16567 else
16568 rq_create->u.request.context.buffer_size =
16569 LPFC_DATA_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040016570 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16571 LPFC_RQE_SIZE_8);
16572 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16573 (PAGE_SIZE/SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050016574 } else {
16575 switch (drq->entry_count) {
16576 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070016577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5a6f1332011-03-11 16:05:35 -050016578 "2536 Unsupported RQ count. (%d)\n",
16579 drq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040016580 if (drq->entry_count < 512) {
16581 status = -EINVAL;
16582 goto out;
16583 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -050016584 fallthrough; /* otherwise default to smallest count */
James Smart5a6f1332011-03-11 16:05:35 -050016585 case 512:
16586 bf_set(lpfc_rq_context_rqe_count,
16587 &rq_create->u.request.context,
16588 LPFC_RQ_RING_SIZE_512);
16589 break;
16590 case 1024:
16591 bf_set(lpfc_rq_context_rqe_count,
16592 &rq_create->u.request.context,
16593 LPFC_RQ_RING_SIZE_1024);
16594 break;
16595 case 2048:
16596 bf_set(lpfc_rq_context_rqe_count,
16597 &rq_create->u.request.context,
16598 LPFC_RQ_RING_SIZE_2048);
16599 break;
16600 case 4096:
16601 bf_set(lpfc_rq_context_rqe_count,
16602 &rq_create->u.request.context,
16603 LPFC_RQ_RING_SIZE_4096);
16604 break;
16605 }
James Smart3c603be2017-05-15 15:20:44 -070016606 if (subtype == LPFC_NVMET)
16607 bf_set(lpfc_rq_context_buf_size,
16608 &rq_create->u.request.context,
16609 LPFC_NVMET_DATA_BUF_SIZE);
16610 else
16611 bf_set(lpfc_rq_context_buf_size,
16612 &rq_create->u.request.context,
16613 LPFC_DATA_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040016614 }
16615 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16616 cq->queue_id);
16617 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16618 drq->page_count);
James Smart4f774512009-05-22 14:52:35 -040016619 list_for_each_entry(dmabuf, &drq->page_list, list) {
16620 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16621 putPaddrLow(dmabuf->phys);
16622 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16623 putPaddrHigh(dmabuf->phys);
16624 }
James Smart962bc512013-01-03 15:44:00 -050016625 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16626 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
James Smart4f774512009-05-22 14:52:35 -040016627 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16628 /* The IOCTL status is embedded in the mailbox subheader. */
16629 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16630 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16631 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16632 if (shdr_status || shdr_add_status || rc) {
16633 status = -ENXIO;
16634 goto out;
16635 }
16636 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16637 if (drq->queue_id == 0xFFFF) {
16638 status = -ENXIO;
16639 goto out;
16640 }
16641 drq->type = LPFC_DRQ;
James Smart2a622bf2011-02-16 12:40:06 -050016642 drq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040016643 drq->subtype = subtype;
16644 drq->host_index = 0;
16645 drq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016646 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart4f774512009-05-22 14:52:35 -040016647
16648 /* link the header and data RQs onto the parent cq child list */
16649 list_add_tail(&hrq->list, &cq->child_list);
16650 list_add_tail(&drq->list, &cq->child_list);
16651
16652out:
James Smart8fa38512009-07-19 10:01:03 -040016653 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016654 return status;
16655}
16656
16657/**
James Smart2d7dbc42017-02-12 13:52:35 -080016658 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16659 * @phba: HBA structure that indicates port to create a queue on.
16660 * @hrqp: The queue structure array to use to create the header receive queues.
16661 * @drqp: The queue structure array to use to create the data receive queues.
16662 * @cqp: The completion queue array to bind these receive queues to.
Lee Jones7af29d42020-07-21 17:41:31 +010016663 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
James Smart2d7dbc42017-02-12 13:52:35 -080016664 *
16665 * This function creates a receive buffer queue pair , as detailed in @hrq and
16666 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16667 * to the HBA.
16668 *
16669 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16670 * struct is used to get the entry count that is necessary to determine the
16671 * number of pages to use for this queue. The @cq is used to indicate which
16672 * completion queue to bind received buffers that are posted to these queues to.
16673 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16674 * receive queue pair. This function is asynchronous and will wait for the
16675 * mailbox command to finish before continuing.
16676 *
16677 * On success this function will return a zero. If unable to allocate enough
16678 * memory this function will return -ENOMEM. If the queue create mailbox command
16679 * fails this function will return -ENXIO.
16680 **/
16681int
16682lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16683 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16684 uint32_t subtype)
16685{
16686 struct lpfc_queue *hrq, *drq, *cq;
16687 struct lpfc_mbx_rq_create_v2 *rq_create;
16688 struct lpfc_dmabuf *dmabuf;
16689 LPFC_MBOXQ_t *mbox;
16690 int rc, length, alloclen, status = 0;
16691 int cnt, idx, numrq, page_idx = 0;
16692 uint32_t shdr_status, shdr_add_status;
16693 union lpfc_sli4_cfg_shdr *shdr;
16694 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16695
16696 numrq = phba->cfg_nvmet_mrq;
16697 /* sanity check on array memory */
16698 if (!hrqp || !drqp || !cqp || !numrq)
16699 return -ENODEV;
16700 if (!phba->sli4_hba.pc_sli4_params.supported)
16701 hw_page_size = SLI4_PAGE_SIZE;
16702
16703 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16704 if (!mbox)
16705 return -ENOMEM;
16706
16707 length = sizeof(struct lpfc_mbx_rq_create_v2);
16708 length += ((2 * numrq * hrqp[0]->page_count) *
16709 sizeof(struct dma_address));
16710
16711 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16712 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16713 LPFC_SLI4_MBX_NEMBED);
16714 if (alloclen < length) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -080016716 "3099 Allocated DMA memory size (%d) is "
16717 "less than the requested DMA memory size "
16718 "(%d)\n", alloclen, length);
16719 status = -ENOMEM;
16720 goto out;
16721 }
16722
16723
16724
16725 rq_create = mbox->sge_array->addr[0];
16726 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16727
16728 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16729 cnt = 0;
16730
16731 for (idx = 0; idx < numrq; idx++) {
16732 hrq = hrqp[idx];
16733 drq = drqp[idx];
16734 cq = cqp[idx];
16735
James Smart2d7dbc42017-02-12 13:52:35 -080016736 /* sanity check on queue memory */
16737 if (!hrq || !drq || !cq) {
16738 status = -ENODEV;
16739 goto out;
16740 }
16741
James Smart7aabe842017-03-04 09:30:22 -080016742 if (hrq->entry_count != drq->entry_count) {
16743 status = -EINVAL;
16744 goto out;
16745 }
16746
James Smart2d7dbc42017-02-12 13:52:35 -080016747 if (idx == 0) {
16748 bf_set(lpfc_mbx_rq_create_num_pages,
16749 &rq_create->u.request,
16750 hrq->page_count);
16751 bf_set(lpfc_mbx_rq_create_rq_cnt,
16752 &rq_create->u.request, (numrq * 2));
16753 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16754 1);
16755 bf_set(lpfc_rq_context_base_cq,
16756 &rq_create->u.request.context,
16757 cq->queue_id);
16758 bf_set(lpfc_rq_context_data_size,
16759 &rq_create->u.request.context,
James Smart3c603be2017-05-15 15:20:44 -070016760 LPFC_NVMET_DATA_BUF_SIZE);
James Smart2d7dbc42017-02-12 13:52:35 -080016761 bf_set(lpfc_rq_context_hdr_size,
16762 &rq_create->u.request.context,
16763 LPFC_HDR_BUF_SIZE);
16764 bf_set(lpfc_rq_context_rqe_count_1,
16765 &rq_create->u.request.context,
16766 hrq->entry_count);
16767 bf_set(lpfc_rq_context_rqe_size,
16768 &rq_create->u.request.context,
16769 LPFC_RQE_SIZE_8);
16770 bf_set(lpfc_rq_context_page_size,
16771 &rq_create->u.request.context,
16772 (PAGE_SIZE/SLI4_PAGE_SIZE));
16773 }
16774 rc = 0;
16775 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16776 memset(dmabuf->virt, 0, hw_page_size);
16777 cnt = page_idx + dmabuf->buffer_tag;
16778 rq_create->u.request.page[cnt].addr_lo =
16779 putPaddrLow(dmabuf->phys);
16780 rq_create->u.request.page[cnt].addr_hi =
16781 putPaddrHigh(dmabuf->phys);
16782 rc++;
16783 }
16784 page_idx += rc;
16785
16786 rc = 0;
16787 list_for_each_entry(dmabuf, &drq->page_list, list) {
16788 memset(dmabuf->virt, 0, hw_page_size);
16789 cnt = page_idx + dmabuf->buffer_tag;
16790 rq_create->u.request.page[cnt].addr_lo =
16791 putPaddrLow(dmabuf->phys);
16792 rq_create->u.request.page[cnt].addr_hi =
16793 putPaddrHigh(dmabuf->phys);
16794 rc++;
16795 }
16796 page_idx += rc;
16797
16798 hrq->db_format = LPFC_DB_RING_FORMAT;
16799 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16800 hrq->type = LPFC_HRQ;
16801 hrq->assoc_qid = cq->queue_id;
16802 hrq->subtype = subtype;
16803 hrq->host_index = 0;
16804 hrq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016805 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart2d7dbc42017-02-12 13:52:35 -080016806
16807 drq->db_format = LPFC_DB_RING_FORMAT;
16808 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16809 drq->type = LPFC_DRQ;
16810 drq->assoc_qid = cq->queue_id;
16811 drq->subtype = subtype;
16812 drq->host_index = 0;
16813 drq->hba_index = 0;
James Smart32517fc2019-01-28 11:14:33 -080016814 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
James Smart2d7dbc42017-02-12 13:52:35 -080016815
16816 list_add_tail(&hrq->list, &cq->child_list);
16817 list_add_tail(&drq->list, &cq->child_list);
16818 }
16819
16820 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16821 /* The IOCTL status is embedded in the mailbox subheader. */
16822 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16823 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16824 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016825 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -080016826 "3120 RQ_CREATE mailbox failed with "
16827 "status x%x add_status x%x, mbx status x%x\n",
16828 shdr_status, shdr_add_status, rc);
16829 status = -ENXIO;
16830 goto out;
16831 }
16832 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16833 if (rc == 0xFFFF) {
16834 status = -ENXIO;
16835 goto out;
16836 }
16837
16838 /* Initialize all RQs with associated queue id */
16839 for (idx = 0; idx < numrq; idx++) {
16840 hrq = hrqp[idx];
16841 hrq->queue_id = rc + (2 * idx);
16842 drq = drqp[idx];
16843 drq->queue_id = rc + (2 * idx) + 1;
16844 }
16845
16846out:
16847 lpfc_sli4_mbox_cmd_free(phba, mbox);
16848 return status;
16849}
16850
16851/**
James Smart4f774512009-05-22 14:52:35 -040016852 * lpfc_eq_destroy - Destroy an event Queue on the HBA
Lee Jones7af29d42020-07-21 17:41:31 +010016853 * @phba: HBA structure that indicates port to destroy a queue on.
James Smart4f774512009-05-22 14:52:35 -040016854 * @eq: The queue structure associated with the queue to destroy.
16855 *
16856 * This function destroys a queue, as detailed in @eq by sending an mailbox
16857 * command, specific to the type of queue, to the HBA.
16858 *
16859 * The @eq struct is used to get the queue ID of the queue to destroy.
16860 *
16861 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016862 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016863 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016864int
James Smart4f774512009-05-22 14:52:35 -040016865lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16866{
16867 LPFC_MBOXQ_t *mbox;
16868 int rc, length, status = 0;
16869 uint32_t shdr_status, shdr_add_status;
16870 union lpfc_sli4_cfg_shdr *shdr;
16871
James Smart2e90f4b2011-12-13 13:22:37 -050016872 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040016873 if (!eq)
16874 return -ENODEV;
James Smart32517fc2019-01-28 11:14:33 -080016875
James Smart4f774512009-05-22 14:52:35 -040016876 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16877 if (!mbox)
16878 return -ENOMEM;
16879 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16880 sizeof(struct lpfc_sli4_cfg_mhdr));
16881 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16882 LPFC_MBOX_OPCODE_EQ_DESTROY,
16883 length, LPFC_SLI4_MBX_EMBED);
16884 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16885 eq->queue_id);
16886 mbox->vport = eq->phba->pport;
16887 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16888
16889 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16890 /* The IOCTL status is embedded in the mailbox subheader. */
16891 shdr = (union lpfc_sli4_cfg_shdr *)
16892 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16893 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16894 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16895 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040016897 "2505 EQ_DESTROY mailbox failed with "
16898 "status x%x add_status x%x, mbx status x%x\n",
16899 shdr_status, shdr_add_status, rc);
16900 status = -ENXIO;
16901 }
16902
16903 /* Remove eq from any list */
16904 list_del_init(&eq->list);
James Smart8fa38512009-07-19 10:01:03 -040016905 mempool_free(mbox, eq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016906 return status;
16907}
16908
16909/**
16910 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
Lee Jones7af29d42020-07-21 17:41:31 +010016911 * @phba: HBA structure that indicates port to destroy a queue on.
James Smart4f774512009-05-22 14:52:35 -040016912 * @cq: The queue structure associated with the queue to destroy.
16913 *
16914 * This function destroys a queue, as detailed in @cq by sending an mailbox
16915 * command, specific to the type of queue, to the HBA.
16916 *
16917 * The @cq struct is used to get the queue ID of the queue to destroy.
16918 *
16919 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016920 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040016921 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016922int
James Smart4f774512009-05-22 14:52:35 -040016923lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16924{
16925 LPFC_MBOXQ_t *mbox;
16926 int rc, length, status = 0;
16927 uint32_t shdr_status, shdr_add_status;
16928 union lpfc_sli4_cfg_shdr *shdr;
16929
James Smart2e90f4b2011-12-13 13:22:37 -050016930 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040016931 if (!cq)
16932 return -ENODEV;
16933 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16934 if (!mbox)
16935 return -ENOMEM;
16936 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16937 sizeof(struct lpfc_sli4_cfg_mhdr));
16938 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16939 LPFC_MBOX_OPCODE_CQ_DESTROY,
16940 length, LPFC_SLI4_MBX_EMBED);
16941 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16942 cq->queue_id);
16943 mbox->vport = cq->phba->pport;
16944 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16945 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16946 /* The IOCTL status is embedded in the mailbox subheader. */
16947 shdr = (union lpfc_sli4_cfg_shdr *)
16948 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16949 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16950 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16951 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070016952 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040016953 "2506 CQ_DESTROY mailbox failed with "
16954 "status x%x add_status x%x, mbx status x%x\n",
16955 shdr_status, shdr_add_status, rc);
16956 status = -ENXIO;
16957 }
16958 /* Remove cq from any list */
16959 list_del_init(&cq->list);
James Smart8fa38512009-07-19 10:01:03 -040016960 mempool_free(mbox, cq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040016961 return status;
16962}
16963
16964/**
James Smart04c68492009-05-22 14:52:52 -040016965 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
Lee Jones7af29d42020-07-21 17:41:31 +010016966 * @phba: HBA structure that indicates port to destroy a queue on.
16967 * @mq: The queue structure associated with the queue to destroy.
James Smart04c68492009-05-22 14:52:52 -040016968 *
16969 * This function destroys a queue, as detailed in @mq by sending an mailbox
16970 * command, specific to the type of queue, to the HBA.
16971 *
16972 * The @mq struct is used to get the queue ID of the queue to destroy.
16973 *
16974 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040016975 * command fails this function will return -ENXIO.
James Smart04c68492009-05-22 14:52:52 -040016976 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040016977int
James Smart04c68492009-05-22 14:52:52 -040016978lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16979{
16980 LPFC_MBOXQ_t *mbox;
16981 int rc, length, status = 0;
16982 uint32_t shdr_status, shdr_add_status;
16983 union lpfc_sli4_cfg_shdr *shdr;
16984
James Smart2e90f4b2011-12-13 13:22:37 -050016985 /* sanity check on queue memory */
James Smart04c68492009-05-22 14:52:52 -040016986 if (!mq)
16987 return -ENODEV;
16988 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16989 if (!mbox)
16990 return -ENOMEM;
16991 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16992 sizeof(struct lpfc_sli4_cfg_mhdr));
16993 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16994 LPFC_MBOX_OPCODE_MQ_DESTROY,
16995 length, LPFC_SLI4_MBX_EMBED);
16996 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16997 mq->queue_id);
16998 mbox->vport = mq->phba->pport;
16999 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17000 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17001 /* The IOCTL status is embedded in the mailbox subheader. */
17002 shdr = (union lpfc_sli4_cfg_shdr *)
17003 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17004 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17005 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17006 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart04c68492009-05-22 14:52:52 -040017008 "2507 MQ_DESTROY mailbox failed with "
17009 "status x%x add_status x%x, mbx status x%x\n",
17010 shdr_status, shdr_add_status, rc);
17011 status = -ENXIO;
17012 }
17013 /* Remove mq from any list */
17014 list_del_init(&mq->list);
James Smart8fa38512009-07-19 10:01:03 -040017015 mempool_free(mbox, mq->phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040017016 return status;
17017}
17018
17019/**
James Smart4f774512009-05-22 14:52:35 -040017020 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
Lee Jones7af29d42020-07-21 17:41:31 +010017021 * @phba: HBA structure that indicates port to destroy a queue on.
James Smart4f774512009-05-22 14:52:35 -040017022 * @wq: The queue structure associated with the queue to destroy.
17023 *
17024 * This function destroys a queue, as detailed in @wq by sending an mailbox
17025 * command, specific to the type of queue, to the HBA.
17026 *
17027 * The @wq struct is used to get the queue ID of the queue to destroy.
17028 *
17029 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040017030 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040017031 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040017032int
James Smart4f774512009-05-22 14:52:35 -040017033lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17034{
17035 LPFC_MBOXQ_t *mbox;
17036 int rc, length, status = 0;
17037 uint32_t shdr_status, shdr_add_status;
17038 union lpfc_sli4_cfg_shdr *shdr;
17039
James Smart2e90f4b2011-12-13 13:22:37 -050017040 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040017041 if (!wq)
17042 return -ENODEV;
17043 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17044 if (!mbox)
17045 return -ENOMEM;
17046 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17047 sizeof(struct lpfc_sli4_cfg_mhdr));
17048 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17049 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17050 length, LPFC_SLI4_MBX_EMBED);
17051 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17052 wq->queue_id);
17053 mbox->vport = wq->phba->pport;
17054 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17055 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17056 shdr = (union lpfc_sli4_cfg_shdr *)
17057 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17058 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17059 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17060 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017062 "2508 WQ_DESTROY mailbox failed with "
17063 "status x%x add_status x%x, mbx status x%x\n",
17064 shdr_status, shdr_add_status, rc);
17065 status = -ENXIO;
17066 }
17067 /* Remove wq from any list */
17068 list_del_init(&wq->list);
James Smartd1f525a2017-04-21 16:04:55 -070017069 kfree(wq->pring);
17070 wq->pring = NULL;
James Smart8fa38512009-07-19 10:01:03 -040017071 mempool_free(mbox, wq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040017072 return status;
17073}
17074
17075/**
17076 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
Lee Jones7af29d42020-07-21 17:41:31 +010017077 * @phba: HBA structure that indicates port to destroy a queue on.
17078 * @hrq: The queue structure associated with the queue to destroy.
17079 * @drq: The queue structure associated with the queue to destroy.
James Smart4f774512009-05-22 14:52:35 -040017080 *
17081 * This function destroys a queue, as detailed in @rq by sending an mailbox
17082 * command, specific to the type of queue, to the HBA.
17083 *
17084 * The @rq struct is used to get the queue ID of the queue to destroy.
17085 *
17086 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040017087 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040017088 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040017089int
James Smart4f774512009-05-22 14:52:35 -040017090lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17091 struct lpfc_queue *drq)
17092{
17093 LPFC_MBOXQ_t *mbox;
17094 int rc, length, status = 0;
17095 uint32_t shdr_status, shdr_add_status;
17096 union lpfc_sli4_cfg_shdr *shdr;
17097
James Smart2e90f4b2011-12-13 13:22:37 -050017098 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040017099 if (!hrq || !drq)
17100 return -ENODEV;
17101 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17102 if (!mbox)
17103 return -ENOMEM;
17104 length = (sizeof(struct lpfc_mbx_rq_destroy) -
James Smartfedd3b72011-02-16 12:39:24 -050017105 sizeof(struct lpfc_sli4_cfg_mhdr));
James Smart4f774512009-05-22 14:52:35 -040017106 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17107 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17108 length, LPFC_SLI4_MBX_EMBED);
17109 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17110 hrq->queue_id);
17111 mbox->vport = hrq->phba->pport;
17112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17113 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17114 /* The IOCTL status is embedded in the mailbox subheader. */
17115 shdr = (union lpfc_sli4_cfg_shdr *)
17116 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17117 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17118 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17119 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017121 "2509 RQ_DESTROY mailbox failed with "
17122 "status x%x add_status x%x, mbx status x%x\n",
17123 shdr_status, shdr_add_status, rc);
James Smart304ee432021-04-11 18:31:17 -070017124 mempool_free(mbox, hrq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040017125 return -ENXIO;
17126 }
17127 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17128 drq->queue_id);
17129 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17130 shdr = (union lpfc_sli4_cfg_shdr *)
17131 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17132 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17133 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17134 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017136 "2510 RQ_DESTROY mailbox failed with "
17137 "status x%x add_status x%x, mbx status x%x\n",
17138 shdr_status, shdr_add_status, rc);
17139 status = -ENXIO;
17140 }
17141 list_del_init(&hrq->list);
17142 list_del_init(&drq->list);
James Smart8fa38512009-07-19 10:01:03 -040017143 mempool_free(mbox, hrq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040017144 return status;
17145}
17146
17147/**
17148 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17149 * @phba: The virtual port for which this call being executed.
17150 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17151 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17152 * @xritag: the xritag that ties this io to the SGL pages.
17153 *
17154 * This routine will post the sgl pages for the IO that has the xritag
17155 * that is in the iocbq structure. The xritag is assigned during iocbq
17156 * creation and persists for as long as the driver is loaded.
17157 * if the caller has fewer than 256 scatter gather segments to map then
17158 * pdma_phys_addr1 should be 0.
17159 * If the caller needs to map more than 256 scatter gather segment then
17160 * pdma_phys_addr1 should be a valid physical address.
17161 * physical address for SGLs must be 64 byte aligned.
17162 * If you are going to map 2 SGL's then the first one must have 256 entries
17163 * the second sgl can have between 1 and 256 entries.
17164 *
17165 * Return codes:
17166 * 0 - Success
17167 * -ENXIO, -ENOMEM - Failure
17168 **/
17169int
17170lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17171 dma_addr_t pdma_phys_addr0,
17172 dma_addr_t pdma_phys_addr1,
17173 uint16_t xritag)
17174{
17175 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17176 LPFC_MBOXQ_t *mbox;
17177 int rc;
17178 uint32_t shdr_status, shdr_add_status;
James Smart6d368e52011-05-24 11:44:12 -040017179 uint32_t mbox_tmo;
James Smart4f774512009-05-22 14:52:35 -040017180 union lpfc_sli4_cfg_shdr *shdr;
17181
17182 if (xritag == NO_XRI) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017184 "0364 Invalid param:\n");
17185 return -EINVAL;
17186 }
17187
17188 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17189 if (!mbox)
17190 return -ENOMEM;
17191
17192 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17193 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17194 sizeof(struct lpfc_mbx_post_sgl_pages) -
James Smartfedd3b72011-02-16 12:39:24 -050017195 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
James Smart4f774512009-05-22 14:52:35 -040017196
17197 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17198 &mbox->u.mqe.un.post_sgl_pages;
17199 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17200 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17201
17202 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17203 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17204 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17205 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17206
17207 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17208 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17209 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17210 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17211 if (!phba->sli4_hba.intr_enable)
17212 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart6d368e52011-05-24 11:44:12 -040017213 else {
James Smarta183a152011-10-10 21:32:43 -040017214 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040017215 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17216 }
James Smart4f774512009-05-22 14:52:35 -040017217 /* The IOCTL status is embedded in the mailbox subheader. */
17218 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17219 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17220 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
James Smart304ee432021-04-11 18:31:17 -070017221 if (!phba->sli4_hba.intr_enable)
17222 mempool_free(mbox, phba->mbox_mem_pool);
17223 else if (rc != MBX_TIMEOUT)
James Smart4f774512009-05-22 14:52:35 -040017224 mempool_free(mbox, phba->mbox_mem_pool);
17225 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017227 "2511 POST_SGL mailbox failed with "
17228 "status x%x add_status x%x, mbx status x%x\n",
17229 shdr_status, shdr_add_status, rc);
James Smart4f774512009-05-22 14:52:35 -040017230 }
17231 return 0;
17232}
James Smart4f774512009-05-22 14:52:35 -040017233
17234/**
James Smart88a2cfb2011-07-22 18:36:33 -040017235 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
James Smart6d368e52011-05-24 11:44:12 -040017236 * @phba: pointer to lpfc hba data structure.
17237 *
17238 * This routine is invoked to post rpi header templates to the
James Smart88a2cfb2011-07-22 18:36:33 -040017239 * HBA consistent with the SLI-4 interface spec. This routine
17240 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17241 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6d368e52011-05-24 11:44:12 -040017242 *
James Smart88a2cfb2011-07-22 18:36:33 -040017243 * Returns
17244 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17245 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17246 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017247static uint16_t
James Smart6d368e52011-05-24 11:44:12 -040017248lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17249{
17250 unsigned long xri;
17251
17252 /*
17253 * Fetch the next logical xri. Because this index is logical,
17254 * the driver starts at 0 each time.
17255 */
17256 spin_lock_irq(&phba->hbalock);
17257 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17258 phba->sli4_hba.max_cfg_param.max_xri, 0);
17259 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17260 spin_unlock_irq(&phba->hbalock);
17261 return NO_XRI;
17262 } else {
17263 set_bit(xri, phba->sli4_hba.xri_bmask);
17264 phba->sli4_hba.max_cfg_param.xri_used++;
James Smart6d368e52011-05-24 11:44:12 -040017265 }
James Smart6d368e52011-05-24 11:44:12 -040017266 spin_unlock_irq(&phba->hbalock);
17267 return xri;
17268}
17269
17270/**
Lee Jones8514e2f2021-03-03 14:46:18 +000017271 * __lpfc_sli4_free_xri - Release an xri for reuse.
James Smart6d368e52011-05-24 11:44:12 -040017272 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010017273 * @xri: xri to release.
James Smart6d368e52011-05-24 11:44:12 -040017274 *
17275 * This routine is invoked to release an xri to the pool of
17276 * available rpis maintained by the driver.
17277 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017278static void
James Smart6d368e52011-05-24 11:44:12 -040017279__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17280{
17281 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
James Smart6d368e52011-05-24 11:44:12 -040017282 phba->sli4_hba.max_cfg_param.xri_used--;
17283 }
17284}
17285
17286/**
17287 * lpfc_sli4_free_xri - Release an xri for reuse.
17288 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010017289 * @xri: xri to release.
James Smart6d368e52011-05-24 11:44:12 -040017290 *
17291 * This routine is invoked to release an xri to the pool of
17292 * available rpis maintained by the driver.
17293 **/
17294void
17295lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17296{
17297 spin_lock_irq(&phba->hbalock);
17298 __lpfc_sli4_free_xri(phba, xri);
17299 spin_unlock_irq(&phba->hbalock);
17300}
17301
17302/**
James Smart4f774512009-05-22 14:52:35 -040017303 * lpfc_sli4_next_xritag - Get an xritag for the io
17304 * @phba: Pointer to HBA context object.
17305 *
17306 * This function gets an xritag for the iocb. If there is no unused xritag
17307 * it will return 0xffff.
17308 * The function returns the allocated xritag if successful, else returns zero.
17309 * Zero is not a valid xritag.
17310 * The caller is not required to hold any lock.
17311 **/
17312uint16_t
17313lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17314{
James Smart6d368e52011-05-24 11:44:12 -040017315 uint16_t xri_index;
James Smart4f774512009-05-22 14:52:35 -040017316
James Smart6d368e52011-05-24 11:44:12 -040017317 xri_index = lpfc_sli4_alloc_xri(phba);
James Smart81378052012-05-09 21:17:37 -040017318 if (xri_index == NO_XRI)
17319 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17320 "2004 Failed to allocate XRI.last XRITAG is %d"
17321 " Max XRI is %d, Used XRI is %d\n",
17322 xri_index,
17323 phba->sli4_hba.max_cfg_param.max_xri,
17324 phba->sli4_hba.max_cfg_param.xri_used);
17325 return xri_index;
James Smart4f774512009-05-22 14:52:35 -040017326}
17327
17328/**
James Smart895427b2017-02-12 13:52:30 -080017329 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
James Smart4f774512009-05-22 14:52:35 -040017330 * @phba: pointer to lpfc hba data structure.
James Smart8a9d2e82012-05-09 21:16:12 -040017331 * @post_sgl_list: pointer to els sgl entry list.
Lee Jones7af29d42020-07-21 17:41:31 +010017332 * @post_cnt: number of els sgl entries on the list.
James Smart4f774512009-05-22 14:52:35 -040017333 *
17334 * This routine is invoked to post a block of driver's sgl pages to the
17335 * HBA using non-embedded mailbox command. No Lock is held. This routine
17336 * is only called when the driver is loading and after all IO has been
17337 * stopped.
17338 **/
James Smart8a9d2e82012-05-09 21:16:12 -040017339static int
James Smart895427b2017-02-12 13:52:30 -080017340lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
James Smart8a9d2e82012-05-09 21:16:12 -040017341 struct list_head *post_sgl_list,
17342 int post_cnt)
James Smart4f774512009-05-22 14:52:35 -040017343{
James Smart8a9d2e82012-05-09 21:16:12 -040017344 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
James Smart4f774512009-05-22 14:52:35 -040017345 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17346 struct sgl_page_pairs *sgl_pg_pairs;
17347 void *viraddr;
17348 LPFC_MBOXQ_t *mbox;
17349 uint32_t reqlen, alloclen, pg_pairs;
17350 uint32_t mbox_tmo;
James Smart8a9d2e82012-05-09 21:16:12 -040017351 uint16_t xritag_start = 0;
17352 int rc = 0;
James Smart4f774512009-05-22 14:52:35 -040017353 uint32_t shdr_status, shdr_add_status;
17354 union lpfc_sli4_cfg_shdr *shdr;
17355
James Smart895427b2017-02-12 13:52:30 -080017356 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040017357 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040017358 if (reqlen > SLI4_PAGE_SIZE) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017360 "2559 Block sgl registration required DMA "
17361 "size (%d) great than a page\n", reqlen);
17362 return -ENOMEM;
17363 }
James Smart895427b2017-02-12 13:52:30 -080017364
James Smart4f774512009-05-22 14:52:35 -040017365 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -040017366 if (!mbox)
James Smart4f774512009-05-22 14:52:35 -040017367 return -ENOMEM;
James Smart4f774512009-05-22 14:52:35 -040017368
17369 /* Allocate DMA memory and set up the non-embedded mailbox command */
17370 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17371 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17372 LPFC_SLI4_MBX_NEMBED);
17373
17374 if (alloclen < reqlen) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017376 "0285 Allocated DMA memory size (%d) is "
17377 "less than the requested DMA memory "
17378 "size (%d)\n", alloclen, reqlen);
17379 lpfc_sli4_mbox_cmd_free(phba, mbox);
17380 return -ENOMEM;
17381 }
James Smart4f774512009-05-22 14:52:35 -040017382 /* Set up the SGL pages in the non-embedded DMA pages */
James Smart6d368e52011-05-24 11:44:12 -040017383 viraddr = mbox->sge_array->addr[0];
James Smart4f774512009-05-22 14:52:35 -040017384 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17385 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17386
James Smart8a9d2e82012-05-09 21:16:12 -040017387 pg_pairs = 0;
17388 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
James Smart4f774512009-05-22 14:52:35 -040017389 /* Set up the sge entry */
17390 sgl_pg_pairs->sgl_pg0_addr_lo =
17391 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17392 sgl_pg_pairs->sgl_pg0_addr_hi =
17393 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17394 sgl_pg_pairs->sgl_pg1_addr_lo =
17395 cpu_to_le32(putPaddrLow(0));
17396 sgl_pg_pairs->sgl_pg1_addr_hi =
17397 cpu_to_le32(putPaddrHigh(0));
James Smart6d368e52011-05-24 11:44:12 -040017398
James Smart4f774512009-05-22 14:52:35 -040017399 /* Keep the first xritag on the list */
17400 if (pg_pairs == 0)
17401 xritag_start = sglq_entry->sli4_xritag;
17402 sgl_pg_pairs++;
James Smart8a9d2e82012-05-09 21:16:12 -040017403 pg_pairs++;
James Smart4f774512009-05-22 14:52:35 -040017404 }
James Smart6d368e52011-05-24 11:44:12 -040017405
17406 /* Complete initialization and perform endian conversion. */
James Smart4f774512009-05-22 14:52:35 -040017407 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
James Smart895427b2017-02-12 13:52:30 -080017408 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
James Smart4f774512009-05-22 14:52:35 -040017409 sgl->word0 = cpu_to_le32(sgl->word0);
James Smart895427b2017-02-12 13:52:30 -080017410
James Smart4f774512009-05-22 14:52:35 -040017411 if (!phba->sli4_hba.intr_enable)
17412 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17413 else {
James Smarta183a152011-10-10 21:32:43 -040017414 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040017415 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17416 }
17417 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17418 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17419 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
James Smart304ee432021-04-11 18:31:17 -070017420 if (!phba->sli4_hba.intr_enable)
17421 lpfc_sli4_mbox_cmd_free(phba, mbox);
17422 else if (rc != MBX_TIMEOUT)
James Smart4f774512009-05-22 14:52:35 -040017423 lpfc_sli4_mbox_cmd_free(phba, mbox);
17424 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017425 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart4f774512009-05-22 14:52:35 -040017426 "2513 POST_SGL_BLOCK mailbox command failed "
17427 "status x%x add_status x%x mbx status x%x\n",
17428 shdr_status, shdr_add_status, rc);
17429 rc = -ENXIO;
17430 }
17431 return rc;
17432}
17433
17434/**
James Smart5e5b5112019-01-28 11:14:22 -080017435 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
James Smart4f774512009-05-22 14:52:35 -040017436 * @phba: pointer to lpfc hba data structure.
James Smart0794d602019-01-28 11:14:19 -080017437 * @nblist: pointer to nvme buffer list.
James Smart4f774512009-05-22 14:52:35 -040017438 * @count: number of scsi buffers on the list.
17439 *
17440 * This routine is invoked to post a block of @count scsi sgl pages from a
James Smart0794d602019-01-28 11:14:19 -080017441 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
James Smart4f774512009-05-22 14:52:35 -040017442 * No Lock is held.
17443 *
17444 **/
James Smart0794d602019-01-28 11:14:19 -080017445static int
James Smart5e5b5112019-01-28 11:14:22 -080017446lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17447 int count)
James Smart4f774512009-05-22 14:52:35 -040017448{
James Smartc4908502019-01-28 11:14:28 -080017449 struct lpfc_io_buf *lpfc_ncmd;
James Smart4f774512009-05-22 14:52:35 -040017450 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17451 struct sgl_page_pairs *sgl_pg_pairs;
17452 void *viraddr;
17453 LPFC_MBOXQ_t *mbox;
17454 uint32_t reqlen, alloclen, pg_pairs;
17455 uint32_t mbox_tmo;
17456 uint16_t xritag_start = 0;
17457 int rc = 0;
17458 uint32_t shdr_status, shdr_add_status;
17459 dma_addr_t pdma_phys_bpl1;
17460 union lpfc_sli4_cfg_shdr *shdr;
17461
17462 /* Calculate the requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -040017463 reqlen = count * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040017464 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040017465 if (reqlen > SLI4_PAGE_SIZE) {
James Smart4f774512009-05-22 14:52:35 -040017466 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
James Smart0794d602019-01-28 11:14:19 -080017467 "6118 Block sgl registration required DMA "
James Smart4f774512009-05-22 14:52:35 -040017468 "size (%d) great than a page\n", reqlen);
17469 return -ENOMEM;
17470 }
17471 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17472 if (!mbox) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017473 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -080017474 "6119 Failed to allocate mbox cmd memory\n");
James Smart4f774512009-05-22 14:52:35 -040017475 return -ENOMEM;
17476 }
17477
17478 /* Allocate DMA memory and set up the non-embedded mailbox command */
17479 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
James Smart0794d602019-01-28 11:14:19 -080017480 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17481 reqlen, LPFC_SLI4_MBX_NEMBED);
James Smart4f774512009-05-22 14:52:35 -040017482
17483 if (alloclen < reqlen) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -080017485 "6120 Allocated DMA memory size (%d) is "
James Smart4f774512009-05-22 14:52:35 -040017486 "less than the requested DMA memory "
17487 "size (%d)\n", alloclen, reqlen);
17488 lpfc_sli4_mbox_cmd_free(phba, mbox);
17489 return -ENOMEM;
17490 }
James Smart6d368e52011-05-24 11:44:12 -040017491
James Smart4f774512009-05-22 14:52:35 -040017492 /* Get the first SGE entry from the non-embedded DMA memory */
James Smart4f774512009-05-22 14:52:35 -040017493 viraddr = mbox->sge_array->addr[0];
17494
17495 /* Set up the SGL pages in the non-embedded DMA pages */
17496 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17497 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17498
17499 pg_pairs = 0;
James Smart0794d602019-01-28 11:14:19 -080017500 list_for_each_entry(lpfc_ncmd, nblist, list) {
James Smart4f774512009-05-22 14:52:35 -040017501 /* Set up the sge entry */
17502 sgl_pg_pairs->sgl_pg0_addr_lo =
James Smart0794d602019-01-28 11:14:19 -080017503 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
James Smart4f774512009-05-22 14:52:35 -040017504 sgl_pg_pairs->sgl_pg0_addr_hi =
James Smart0794d602019-01-28 11:14:19 -080017505 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
James Smart4f774512009-05-22 14:52:35 -040017506 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
James Smart0794d602019-01-28 11:14:19 -080017507 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17508 SGL_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040017509 else
17510 pdma_phys_bpl1 = 0;
17511 sgl_pg_pairs->sgl_pg1_addr_lo =
17512 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17513 sgl_pg_pairs->sgl_pg1_addr_hi =
17514 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17515 /* Keep the first xritag on the list */
17516 if (pg_pairs == 0)
James Smart0794d602019-01-28 11:14:19 -080017517 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -040017518 sgl_pg_pairs++;
17519 pg_pairs++;
17520 }
17521 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17522 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17523 /* Perform endian conversion if necessary */
17524 sgl->word0 = cpu_to_le32(sgl->word0);
17525
James Smart0794d602019-01-28 11:14:19 -080017526 if (!phba->sli4_hba.intr_enable) {
James Smart4f774512009-05-22 14:52:35 -040017527 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart0794d602019-01-28 11:14:19 -080017528 } else {
James Smarta183a152011-10-10 21:32:43 -040017529 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040017530 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17531 }
James Smart0794d602019-01-28 11:14:19 -080017532 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040017533 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17534 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
James Smart304ee432021-04-11 18:31:17 -070017535 if (!phba->sli4_hba.intr_enable)
17536 lpfc_sli4_mbox_cmd_free(phba, mbox);
17537 else if (rc != MBX_TIMEOUT)
James Smart4f774512009-05-22 14:52:35 -040017538 lpfc_sli4_mbox_cmd_free(phba, mbox);
17539 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070017540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -080017541 "6125 POST_SGL_BLOCK mailbox command failed "
James Smart4f774512009-05-22 14:52:35 -040017542 "status x%x add_status x%x mbx status x%x\n",
17543 shdr_status, shdr_add_status, rc);
17544 rc = -ENXIO;
17545 }
17546 return rc;
17547}
17548
17549/**
James Smart5e5b5112019-01-28 11:14:22 -080017550 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
James Smart0794d602019-01-28 11:14:19 -080017551 * @phba: pointer to lpfc hba data structure.
17552 * @post_nblist: pointer to the nvme buffer list.
Lee Jones7af29d42020-07-21 17:41:31 +010017553 * @sb_count: number of nvme buffers.
James Smart0794d602019-01-28 11:14:19 -080017554 *
17555 * This routine walks a list of nvme buffers that was passed in. It attempts
17556 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17557 * uses the non-embedded SGL block post mailbox commands to post to the port.
17558 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17559 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17560 * must be local list, thus no lock is needed when manipulate the list.
17561 *
17562 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17563 **/
17564int
James Smart5e5b5112019-01-28 11:14:22 -080017565lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17566 struct list_head *post_nblist, int sb_count)
James Smart0794d602019-01-28 11:14:19 -080017567{
James Smartc4908502019-01-28 11:14:28 -080017568 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
James Smart0794d602019-01-28 11:14:19 -080017569 int status, sgl_size;
17570 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17571 dma_addr_t pdma_phys_sgl1;
17572 int last_xritag = NO_XRI;
17573 int cur_xritag;
James Smart0794d602019-01-28 11:14:19 -080017574 LIST_HEAD(prep_nblist);
17575 LIST_HEAD(blck_nblist);
17576 LIST_HEAD(nvme_nblist);
17577
17578 /* sanity check */
17579 if (sb_count <= 0)
17580 return -EINVAL;
17581
17582 sgl_size = phba->cfg_sg_dma_buf_size;
17583 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17584 list_del_init(&lpfc_ncmd->list);
17585 block_cnt++;
17586 if ((last_xritag != NO_XRI) &&
17587 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17588 /* a hole in xri block, form a sgl posting block */
17589 list_splice_init(&prep_nblist, &blck_nblist);
17590 post_cnt = block_cnt - 1;
17591 /* prepare list for next posting block */
17592 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17593 block_cnt = 1;
17594 } else {
17595 /* prepare list for next posting block */
17596 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17597 /* enough sgls for non-embed sgl mbox command */
17598 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17599 list_splice_init(&prep_nblist, &blck_nblist);
17600 post_cnt = block_cnt;
17601 block_cnt = 0;
17602 }
17603 }
17604 num_posting++;
17605 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17606
17607 /* end of repost sgl list condition for NVME buffers */
17608 if (num_posting == sb_count) {
17609 if (post_cnt == 0) {
17610 /* last sgl posting block */
17611 list_splice_init(&prep_nblist, &blck_nblist);
17612 post_cnt = block_cnt;
17613 } else if (block_cnt == 1) {
17614 /* last single sgl with non-contiguous xri */
17615 if (sgl_size > SGL_PAGE_SIZE)
17616 pdma_phys_sgl1 =
17617 lpfc_ncmd->dma_phys_sgl +
17618 SGL_PAGE_SIZE;
17619 else
17620 pdma_phys_sgl1 = 0;
17621 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17622 status = lpfc_sli4_post_sgl(
17623 phba, lpfc_ncmd->dma_phys_sgl,
17624 pdma_phys_sgl1, cur_xritag);
17625 if (status) {
James Smartc4908502019-01-28 11:14:28 -080017626 /* Post error. Buffer unavailable. */
17627 lpfc_ncmd->flags |=
17628 LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017629 } else {
James Smartc4908502019-01-28 11:14:28 -080017630 /* Post success. Bffer available. */
17631 lpfc_ncmd->flags &=
17632 ~LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017633 lpfc_ncmd->status = IOSTAT_SUCCESS;
17634 num_posted++;
17635 }
17636 /* success, put on NVME buffer sgl list */
17637 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17638 }
17639 }
17640
17641 /* continue until a nembed page worth of sgls */
17642 if (post_cnt == 0)
17643 continue;
17644
17645 /* post block of NVME buffer list sgls */
James Smart5e5b5112019-01-28 11:14:22 -080017646 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17647 post_cnt);
James Smart0794d602019-01-28 11:14:19 -080017648
17649 /* don't reset xirtag due to hole in xri block */
17650 if (block_cnt == 0)
17651 last_xritag = NO_XRI;
17652
17653 /* reset NVME buffer post count for next round of posting */
17654 post_cnt = 0;
17655
17656 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17657 while (!list_empty(&blck_nblist)) {
17658 list_remove_head(&blck_nblist, lpfc_ncmd,
James Smartc4908502019-01-28 11:14:28 -080017659 struct lpfc_io_buf, list);
James Smart0794d602019-01-28 11:14:19 -080017660 if (status) {
James Smartc4908502019-01-28 11:14:28 -080017661 /* Post error. Mark buffer unavailable. */
17662 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017663 } else {
James Smartc4908502019-01-28 11:14:28 -080017664 /* Post success, Mark buffer available. */
17665 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
James Smart0794d602019-01-28 11:14:19 -080017666 lpfc_ncmd->status = IOSTAT_SUCCESS;
17667 num_posted++;
17668 }
17669 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17670 }
17671 }
17672 /* Push NVME buffers with sgl posted to the available list */
James Smart5e5b5112019-01-28 11:14:22 -080017673 lpfc_io_buf_replenish(phba, &nvme_nblist);
17674
James Smart0794d602019-01-28 11:14:19 -080017675 return num_posted;
17676}
17677
17678/**
James Smart4f774512009-05-22 14:52:35 -040017679 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17680 * @phba: pointer to lpfc_hba struct that the frame was received on
17681 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17682 *
17683 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17684 * valid type of frame that the LPFC driver will handle. This function will
17685 * return a zero if the frame is a valid frame or a non zero value when the
17686 * frame does not pass the check.
17687 **/
17688static int
17689lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17690{
Tomas Henzl474ffb72010-12-22 16:52:40 +010017691 /* make rctl_names static to save stack space */
James Smart4f774512009-05-22 14:52:35 -040017692 struct fc_vft_header *fc_vft_hdr;
James Smart546fc852011-03-11 16:06:29 -050017693 uint32_t *header = (uint32_t *) fc_hdr;
James Smart4f774512009-05-22 14:52:35 -040017694
James Smarte62245d2019-08-14 16:57:08 -070017695#define FC_RCTL_MDS_DIAGS 0xF4
17696
James Smart4f774512009-05-22 14:52:35 -040017697 switch (fc_hdr->fh_r_ctl) {
17698 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17699 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17700 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17701 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17702 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17703 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17704 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17705 case FC_RCTL_DD_CMD_STATUS: /* command status */
17706 case FC_RCTL_ELS_REQ: /* extended link services request */
17707 case FC_RCTL_ELS_REP: /* extended link services reply */
17708 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17709 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17710 case FC_RCTL_BA_NOP: /* basic link service NOP */
17711 case FC_RCTL_BA_ABTS: /* basic link service abort */
17712 case FC_RCTL_BA_RMC: /* remove connection */
17713 case FC_RCTL_BA_ACC: /* basic accept */
17714 case FC_RCTL_BA_RJT: /* basic reject */
17715 case FC_RCTL_BA_PRMT:
17716 case FC_RCTL_ACK_1: /* acknowledge_1 */
17717 case FC_RCTL_ACK_0: /* acknowledge_0 */
17718 case FC_RCTL_P_RJT: /* port reject */
17719 case FC_RCTL_F_RJT: /* fabric reject */
17720 case FC_RCTL_P_BSY: /* port busy */
17721 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17722 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17723 case FC_RCTL_LCR: /* link credit reset */
James Smartae9e28f2017-05-15 15:20:51 -070017724 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
James Smart4f774512009-05-22 14:52:35 -040017725 case FC_RCTL_END: /* end */
17726 break;
17727 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17728 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17729 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17730 return lpfc_fc_frame_check(phba, fc_hdr);
17731 default:
17732 goto drop;
17733 }
James Smartae9e28f2017-05-15 15:20:51 -070017734
James Smart4f774512009-05-22 14:52:35 -040017735 switch (fc_hdr->fh_type) {
17736 case FC_TYPE_BLS:
17737 case FC_TYPE_ELS:
17738 case FC_TYPE_FCP:
17739 case FC_TYPE_CT:
James Smart895427b2017-02-12 13:52:30 -080017740 case FC_TYPE_NVME:
James Smart4f774512009-05-22 14:52:35 -040017741 break;
17742 case FC_TYPE_IP:
17743 case FC_TYPE_ILS:
17744 default:
17745 goto drop;
17746 }
James Smart546fc852011-03-11 16:06:29 -050017747
James Smart4f774512009-05-22 14:52:35 -040017748 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart78e1d202017-06-01 21:07:09 -070017749 "2538 Received frame rctl:x%x, type:x%x, "
James Smart88f43a02013-04-17 20:19:44 -040017750 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
James Smart78e1d202017-06-01 21:07:09 -070017751 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17752 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17753 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17754 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17755 be32_to_cpu(header[6]));
James Smart4f774512009-05-22 14:52:35 -040017756 return 0;
17757drop:
17758 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
James Smart78e1d202017-06-01 21:07:09 -070017759 "2539 Dropped frame rctl:x%x type:x%x\n",
17760 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
James Smart4f774512009-05-22 14:52:35 -040017761 return 1;
17762}
17763
17764/**
17765 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17766 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17767 *
17768 * This function processes the FC header to retrieve the VFI from the VF
17769 * header, if one exists. This function will return the VFI if one exists
17770 * or 0 if no VSAN Header exists.
17771 **/
17772static uint32_t
17773lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17774{
17775 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17776
17777 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17778 return 0;
17779 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17780}
17781
17782/**
17783 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17784 * @phba: Pointer to the HBA structure to search for the vport on
17785 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17786 * @fcfi: The FC Fabric ID that the frame came from
Lee Jones7af29d42020-07-21 17:41:31 +010017787 * @did: Destination ID to match against
James Smart4f774512009-05-22 14:52:35 -040017788 *
17789 * This function searches the @phba for a vport that matches the content of the
17790 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17791 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17792 * returns the matching vport pointer or NULL if unable to match frame to a
17793 * vport.
17794 **/
17795static struct lpfc_vport *
17796lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
James Smart895427b2017-02-12 13:52:30 -080017797 uint16_t fcfi, uint32_t did)
James Smart4f774512009-05-22 14:52:35 -040017798{
17799 struct lpfc_vport **vports;
17800 struct lpfc_vport *vport = NULL;
17801 int i;
James Smart939723a2012-05-09 21:19:03 -040017802
James Smartbf086112011-08-21 21:48:13 -040017803 if (did == Fabric_DID)
17804 return phba->pport;
James Smart939723a2012-05-09 21:19:03 -040017805 if ((phba->pport->fc_flag & FC_PT2PT) &&
17806 !(phba->link_state == LPFC_HBA_READY))
17807 return phba->pport;
17808
James Smart4f774512009-05-22 14:52:35 -040017809 vports = lpfc_create_vport_work_array(phba);
James Smart895427b2017-02-12 13:52:30 -080017810 if (vports != NULL) {
James Smart4f774512009-05-22 14:52:35 -040017811 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17812 if (phba->fcf.fcfi == fcfi &&
17813 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17814 vports[i]->fc_myDID == did) {
17815 vport = vports[i];
17816 break;
17817 }
17818 }
James Smart895427b2017-02-12 13:52:30 -080017819 }
James Smart4f774512009-05-22 14:52:35 -040017820 lpfc_destroy_vport_work_array(phba, vports);
17821 return vport;
17822}
17823
17824/**
James Smart45ed1192009-10-02 15:17:02 -040017825 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17826 * @vport: The vport to work on.
17827 *
17828 * This function updates the receive sequence time stamp for this vport. The
17829 * receive sequence time stamp indicates the time that the last frame of the
17830 * the sequence that has been idle for the longest amount of time was received.
17831 * the driver uses this time stamp to indicate if any received sequences have
17832 * timed out.
17833 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017834static void
James Smart45ed1192009-10-02 15:17:02 -040017835lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17836{
17837 struct lpfc_dmabuf *h_buf;
17838 struct hbq_dmabuf *dmabuf = NULL;
17839
17840 /* get the oldest sequence on the rcv list */
17841 h_buf = list_get_first(&vport->rcv_buffer_list,
17842 struct lpfc_dmabuf, list);
17843 if (!h_buf)
17844 return;
17845 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17846 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17847}
17848
17849/**
17850 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17851 * @vport: The vport that the received sequences were sent to.
17852 *
17853 * This function cleans up all outstanding received sequences. This is called
17854 * by the driver when a link event or user action invalidates all the received
17855 * sequences.
17856 **/
17857void
17858lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17859{
17860 struct lpfc_dmabuf *h_buf, *hnext;
17861 struct lpfc_dmabuf *d_buf, *dnext;
17862 struct hbq_dmabuf *dmabuf = NULL;
17863
17864 /* start with the oldest sequence on the rcv list */
17865 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17866 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17867 list_del_init(&dmabuf->hbuf.list);
17868 list_for_each_entry_safe(d_buf, dnext,
17869 &dmabuf->dbuf.list, list) {
17870 list_del_init(&d_buf->list);
17871 lpfc_in_buf_free(vport->phba, d_buf);
17872 }
17873 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17874 }
17875}
17876
17877/**
17878 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17879 * @vport: The vport that the received sequences were sent to.
17880 *
17881 * This function determines whether any received sequences have timed out by
17882 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17883 * indicates that there is at least one timed out sequence this routine will
17884 * go through the received sequences one at a time from most inactive to most
17885 * active to determine which ones need to be cleaned up. Once it has determined
17886 * that a sequence needs to be cleaned up it will simply free up the resources
17887 * without sending an abort.
17888 **/
17889void
17890lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17891{
17892 struct lpfc_dmabuf *h_buf, *hnext;
17893 struct lpfc_dmabuf *d_buf, *dnext;
17894 struct hbq_dmabuf *dmabuf = NULL;
17895 unsigned long timeout;
17896 int abort_count = 0;
17897
17898 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17899 vport->rcv_buffer_time_stamp);
17900 if (list_empty(&vport->rcv_buffer_list) ||
17901 time_before(jiffies, timeout))
17902 return;
17903 /* start with the oldest sequence on the rcv list */
17904 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17905 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17906 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17907 dmabuf->time_stamp);
17908 if (time_before(jiffies, timeout))
17909 break;
17910 abort_count++;
17911 list_del_init(&dmabuf->hbuf.list);
17912 list_for_each_entry_safe(d_buf, dnext,
17913 &dmabuf->dbuf.list, list) {
17914 list_del_init(&d_buf->list);
17915 lpfc_in_buf_free(vport->phba, d_buf);
17916 }
17917 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17918 }
17919 if (abort_count)
17920 lpfc_update_rcv_time_stamp(vport);
17921}
17922
17923/**
James Smart4f774512009-05-22 14:52:35 -040017924 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
Lee Jones7af29d42020-07-21 17:41:31 +010017925 * @vport: pointer to a vitural port
James Smart4f774512009-05-22 14:52:35 -040017926 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17927 *
17928 * This function searches through the existing incomplete sequences that have
17929 * been sent to this @vport. If the frame matches one of the incomplete
17930 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17931 * make up that sequence. If no sequence is found that matches this frame then
17932 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17933 * This function returns a pointer to the first dmabuf in the sequence list that
17934 * the frame was linked to.
17935 **/
17936static struct hbq_dmabuf *
17937lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17938{
17939 struct fc_frame_header *new_hdr;
17940 struct fc_frame_header *temp_hdr;
17941 struct lpfc_dmabuf *d_buf;
17942 struct lpfc_dmabuf *h_buf;
17943 struct hbq_dmabuf *seq_dmabuf = NULL;
17944 struct hbq_dmabuf *temp_dmabuf = NULL;
James Smart4360ca92015-12-16 18:12:04 -050017945 uint8_t found = 0;
James Smart4f774512009-05-22 14:52:35 -040017946
James Smart4d9ab992009-10-02 15:16:39 -040017947 INIT_LIST_HEAD(&dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040017948 dmabuf->time_stamp = jiffies;
James Smart4f774512009-05-22 14:52:35 -040017949 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart4360ca92015-12-16 18:12:04 -050017950
James Smart4f774512009-05-22 14:52:35 -040017951 /* Use the hdr_buf to find the sequence that this frame belongs to */
17952 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17953 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17954 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17955 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17956 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17957 continue;
17958 /* found a pending sequence that matches this frame */
17959 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17960 break;
17961 }
17962 if (!seq_dmabuf) {
17963 /*
17964 * This indicates first frame received for this sequence.
17965 * Queue the buffer on the vport's rcv_buffer_list.
17966 */
17967 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
James Smart45ed1192009-10-02 15:17:02 -040017968 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040017969 return dmabuf;
17970 }
17971 temp_hdr = seq_dmabuf->hbuf.virt;
James Smarteeead812009-12-21 17:01:23 -050017972 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17973 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4d9ab992009-10-02 15:16:39 -040017974 list_del_init(&seq_dmabuf->hbuf.list);
17975 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17976 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040017977 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040017978 return dmabuf;
17979 }
James Smart45ed1192009-10-02 15:17:02 -040017980 /* move this sequence to the tail to indicate a young sequence */
17981 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17982 seq_dmabuf->time_stamp = jiffies;
17983 lpfc_update_rcv_time_stamp(vport);
James Smarteeead812009-12-21 17:01:23 -050017984 if (list_empty(&seq_dmabuf->dbuf.list)) {
James Smarteeead812009-12-21 17:01:23 -050017985 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17986 return seq_dmabuf;
17987 }
James Smart4f774512009-05-22 14:52:35 -040017988 /* find the correct place in the sequence to insert this frame */
James Smart4360ca92015-12-16 18:12:04 -050017989 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17990 while (!found) {
James Smart4f774512009-05-22 14:52:35 -040017991 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17992 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17993 /*
17994 * If the frame's sequence count is greater than the frame on
17995 * the list then insert the frame right after this frame
17996 */
James Smarteeead812009-12-21 17:01:23 -050017997 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17998 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4f774512009-05-22 14:52:35 -040017999 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
James Smart4360ca92015-12-16 18:12:04 -050018000 found = 1;
18001 break;
James Smart4f774512009-05-22 14:52:35 -040018002 }
James Smart4360ca92015-12-16 18:12:04 -050018003
18004 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18005 break;
18006 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
James Smart4f774512009-05-22 14:52:35 -040018007 }
James Smart4360ca92015-12-16 18:12:04 -050018008
18009 if (found)
18010 return seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040018011 return NULL;
18012}
18013
18014/**
James Smart6669f9b2009-10-02 15:16:45 -040018015 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18016 * @vport: pointer to a vitural port
18017 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18018 *
18019 * This function tries to abort from the partially assembed sequence, described
18020 * by the information from basic abbort @dmabuf. It checks to see whether such
18021 * partially assembled sequence held by the driver. If so, it shall free up all
18022 * the frames from the partially assembled sequence.
18023 *
18024 * Return
18025 * true -- if there is matching partially assembled sequence present and all
18026 * the frames freed with the sequence;
18027 * false -- if there is no matching partially assembled sequence present so
18028 * nothing got aborted in the lower layer driver
18029 **/
18030static bool
18031lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18032 struct hbq_dmabuf *dmabuf)
18033{
18034 struct fc_frame_header *new_hdr;
18035 struct fc_frame_header *temp_hdr;
18036 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18037 struct hbq_dmabuf *seq_dmabuf = NULL;
18038
18039 /* Use the hdr_buf to find the sequence that matches this frame */
18040 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18041 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18042 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18043 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18044 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18045 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18046 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18047 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18048 continue;
18049 /* found a pending sequence that matches this frame */
18050 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18051 break;
18052 }
18053
18054 /* Free up all the frames from the partially assembled sequence */
18055 if (seq_dmabuf) {
18056 list_for_each_entry_safe(d_buf, n_buf,
18057 &seq_dmabuf->dbuf.list, list) {
18058 list_del_init(&d_buf->list);
18059 lpfc_in_buf_free(vport->phba, d_buf);
18060 }
18061 return true;
18062 }
18063 return false;
18064}
18065
18066/**
James Smart6dd9e312013-01-03 15:43:37 -050018067 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18068 * @vport: pointer to a vitural port
18069 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18070 *
18071 * This function tries to abort from the assembed sequence from upper level
18072 * protocol, described by the information from basic abbort @dmabuf. It
18073 * checks to see whether such pending context exists at upper level protocol.
18074 * If so, it shall clean up the pending context.
18075 *
18076 * Return
18077 * true -- if there is matching pending context of the sequence cleaned
18078 * at ulp;
18079 * false -- if there is no matching pending context of the sequence present
18080 * at ulp.
18081 **/
18082static bool
18083lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18084{
18085 struct lpfc_hba *phba = vport->phba;
18086 int handled;
18087
18088 /* Accepting abort at ulp with SLI4 only */
18089 if (phba->sli_rev < LPFC_SLI_REV4)
18090 return false;
18091
18092 /* Register all caring upper level protocols to attend abort */
18093 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18094 if (handled)
18095 return true;
18096
18097 return false;
18098}
18099
18100/**
James Smart546fc852011-03-11 16:06:29 -050018101 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
James Smart6669f9b2009-10-02 15:16:45 -040018102 * @phba: Pointer to HBA context object.
18103 * @cmd_iocbq: pointer to the command iocbq structure.
18104 * @rsp_iocbq: pointer to the response iocbq structure.
18105 *
James Smart546fc852011-03-11 16:06:29 -050018106 * This function handles the sequence abort response iocb command complete
James Smart6669f9b2009-10-02 15:16:45 -040018107 * event. It properly releases the memory allocated to the sequence abort
18108 * accept iocb.
18109 **/
18110static void
James Smart546fc852011-03-11 16:06:29 -050018111lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
James Smart6669f9b2009-10-02 15:16:45 -040018112 struct lpfc_iocbq *cmd_iocbq,
18113 struct lpfc_iocbq *rsp_iocbq)
18114{
James Smart6dd9e312013-01-03 15:43:37 -050018115 struct lpfc_nodelist *ndlp;
18116
18117 if (cmd_iocbq) {
18118 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18119 lpfc_nlp_put(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040018120 lpfc_sli_release_iocbq(phba, cmd_iocbq);
James Smart6dd9e312013-01-03 15:43:37 -050018121 }
James Smart6b5151f2012-01-18 16:24:06 -050018122
18123 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18124 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
Dick Kennedy372c1872020-06-30 14:50:00 -070018125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6b5151f2012-01-18 16:24:06 -050018126 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18127 rsp_iocbq->iocb.ulpStatus,
18128 rsp_iocbq->iocb.un.ulpWord[4]);
James Smart6669f9b2009-10-02 15:16:45 -040018129}
18130
18131/**
James Smart6d368e52011-05-24 11:44:12 -040018132 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18133 * @phba: Pointer to HBA context object.
18134 * @xri: xri id in transaction.
18135 *
18136 * This function validates the xri maps to the known range of XRIs allocated an
18137 * used by the driver.
18138 **/
James Smart7851fe22011-07-22 18:36:52 -040018139uint16_t
James Smart6d368e52011-05-24 11:44:12 -040018140lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18141 uint16_t xri)
18142{
James Smarta2fc4aef2014-09-03 12:57:55 -040018143 uint16_t i;
James Smart6d368e52011-05-24 11:44:12 -040018144
18145 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18146 if (xri == phba->sli4_hba.xri_ids[i])
18147 return i;
18148 }
18149 return NO_XRI;
18150}
18151
James Smart6d368e52011-05-24 11:44:12 -040018152/**
James Smart546fc852011-03-11 16:06:29 -050018153 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
James Smart3bfab8a2021-04-11 18:31:23 -070018154 * @vport: pointer to a virtual port.
James Smart6669f9b2009-10-02 15:16:45 -040018155 * @fc_hdr: pointer to a FC frame header.
Lee Jones7af29d42020-07-21 17:41:31 +010018156 * @aborted: was the partially assembled receive sequence successfully aborted
James Smart6669f9b2009-10-02 15:16:45 -040018157 *
James Smart546fc852011-03-11 16:06:29 -050018158 * This function sends a basic response to a previous unsol sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040018159 * event after aborting the sequence handling.
18160 **/
James Smart86c67372017-04-21 16:05:04 -070018161void
James Smart6dd9e312013-01-03 15:43:37 -050018162lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18163 struct fc_frame_header *fc_hdr, bool aborted)
James Smart6669f9b2009-10-02 15:16:45 -040018164{
James Smart6dd9e312013-01-03 15:43:37 -050018165 struct lpfc_hba *phba = vport->phba;
James Smart6669f9b2009-10-02 15:16:45 -040018166 struct lpfc_iocbq *ctiocb = NULL;
18167 struct lpfc_nodelist *ndlp;
James Smartee0f4fe2012-05-09 21:19:14 -040018168 uint16_t oxid, rxid, xri, lxri;
James Smart5ffc2662009-11-18 15:39:44 -050018169 uint32_t sid, fctl;
James Smart6669f9b2009-10-02 15:16:45 -040018170 IOCB_t *icmd;
James Smart546fc852011-03-11 16:06:29 -050018171 int rc;
James Smart6669f9b2009-10-02 15:16:45 -040018172
18173 if (!lpfc_is_link_up(phba))
18174 return;
18175
18176 sid = sli4_sid_from_fc_hdr(fc_hdr);
18177 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
James Smart5ffc2662009-11-18 15:39:44 -050018178 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
James Smart6669f9b2009-10-02 15:16:45 -040018179
James Smart6dd9e312013-01-03 15:43:37 -050018180 ndlp = lpfc_findnode_did(vport, sid);
James Smart6669f9b2009-10-02 15:16:45 -040018181 if (!ndlp) {
James Smart9d3d3402017-04-21 16:05:00 -070018182 ndlp = lpfc_nlp_init(vport, sid);
James Smart6dd9e312013-01-03 15:43:37 -050018183 if (!ndlp) {
18184 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18185 "1268 Failed to allocate ndlp for "
18186 "oxid:x%x SID:x%x\n", oxid, sid);
18187 return;
18188 }
James Smart6dd9e312013-01-03 15:43:37 -050018189 /* Put ndlp onto pport node list */
18190 lpfc_enqueue_node(vport, ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040018191 }
18192
James Smart546fc852011-03-11 16:06:29 -050018193 /* Allocate buffer for rsp iocb */
James Smart6669f9b2009-10-02 15:16:45 -040018194 ctiocb = lpfc_sli_get_iocbq(phba);
18195 if (!ctiocb)
18196 return;
18197
James Smart5ffc2662009-11-18 15:39:44 -050018198 /* Extract the F_CTL field from FC_HDR */
18199 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18200
James Smart6669f9b2009-10-02 15:16:45 -040018201 icmd = &ctiocb->iocb;
James Smart6669f9b2009-10-02 15:16:45 -040018202 icmd->un.xseq64.bdl.bdeSize = 0;
James Smart5ffc2662009-11-18 15:39:44 -050018203 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
James Smart6669f9b2009-10-02 15:16:45 -040018204 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18205 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18206 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18207
18208 /* Fill in the rest of iocb fields */
18209 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18210 icmd->ulpBdeCount = 0;
18211 icmd->ulpLe = 1;
18212 icmd->ulpClass = CLASS3;
James Smart6d368e52011-05-24 11:44:12 -040018213 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
James Smart6dd9e312013-01-03 15:43:37 -050018214 ctiocb->context1 = lpfc_nlp_get(ndlp);
James Smart4430f7f2020-11-15 11:26:31 -080018215 if (!ctiocb->context1) {
18216 lpfc_sli_release_iocbq(phba, ctiocb);
18217 return;
18218 }
James Smart6669f9b2009-10-02 15:16:45 -040018219
James Smart6669f9b2009-10-02 15:16:45 -040018220 ctiocb->vport = phba->pport;
James Smart546fc852011-03-11 16:06:29 -050018221 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
James Smart6d368e52011-05-24 11:44:12 -040018222 ctiocb->sli4_lxritag = NO_XRI;
James Smart546fc852011-03-11 16:06:29 -050018223 ctiocb->sli4_xritag = NO_XRI;
18224
James Smartee0f4fe2012-05-09 21:19:14 -040018225 if (fctl & FC_FC_EX_CTX)
18226 /* Exchange responder sent the abort so we
18227 * own the oxid.
18228 */
18229 xri = oxid;
18230 else
18231 xri = rxid;
18232 lxri = lpfc_sli4_xri_inrange(phba, xri);
18233 if (lxri != NO_XRI)
18234 lpfc_set_rrq_active(phba, ndlp, lxri,
18235 (xri == oxid) ? rxid : oxid, 0);
James Smart6dd9e312013-01-03 15:43:37 -050018236 /* For BA_ABTS from exchange responder, if the logical xri with
18237 * the oxid maps to the FCP XRI range, the port no longer has
18238 * that exchange context, send a BLS_RJT. Override the IOCB for
18239 * a BA_RJT.
James Smart546fc852011-03-11 16:06:29 -050018240 */
James Smart6dd9e312013-01-03 15:43:37 -050018241 if ((fctl & FC_FC_EX_CTX) &&
James Smart895427b2017-02-12 13:52:30 -080018242 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
James Smart6dd9e312013-01-03 15:43:37 -050018243 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18244 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18245 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18246 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18247 }
18248
18249 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18250 * the driver no longer has that exchange, send a BLS_RJT. Override
18251 * the IOCB for a BA_RJT.
18252 */
18253 if (aborted == false) {
James Smart546fc852011-03-11 16:06:29 -050018254 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18255 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18256 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18257 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18258 }
James Smart6669f9b2009-10-02 15:16:45 -040018259
James Smart5ffc2662009-11-18 15:39:44 -050018260 if (fctl & FC_FC_EX_CTX) {
18261 /* ABTS sent by responder to CT exchange, construction
18262 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18263 * field and RX_ID from ABTS for RX_ID field.
18264 */
James Smart546fc852011-03-11 16:06:29 -050018265 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
James Smart5ffc2662009-11-18 15:39:44 -050018266 } else {
18267 /* ABTS sent by initiator to CT exchange, construction
18268 * of BA_ACC will need to allocate a new XRI as for the
James Smartf09c3ac2012-03-01 22:33:29 -050018269 * XRI_TAG field.
James Smart5ffc2662009-11-18 15:39:44 -050018270 */
James Smart546fc852011-03-11 16:06:29 -050018271 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
James Smart5ffc2662009-11-18 15:39:44 -050018272 }
James Smartf09c3ac2012-03-01 22:33:29 -050018273 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
James Smart546fc852011-03-11 16:06:29 -050018274 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
James Smart5ffc2662009-11-18 15:39:44 -050018275
James Smart546fc852011-03-11 16:06:29 -050018276 /* Xmit CT abts response on exchange <xid> */
James Smart6dd9e312013-01-03 15:43:37 -050018277 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18278 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18279 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
James Smart546fc852011-03-11 16:06:29 -050018280
18281 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18282 if (rc == IOCB_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070018283 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
James Smart6dd9e312013-01-03 15:43:37 -050018284 "2925 Failed to issue CT ABTS RSP x%x on "
18285 "xri x%x, Data x%x\n",
18286 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18287 phba->link_state);
18288 lpfc_nlp_put(ndlp);
18289 ctiocb->context1 = NULL;
James Smart546fc852011-03-11 16:06:29 -050018290 lpfc_sli_release_iocbq(phba, ctiocb);
18291 }
James Smart6669f9b2009-10-02 15:16:45 -040018292}
18293
18294/**
18295 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18296 * @vport: Pointer to the vport on which this sequence was received
18297 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18298 *
18299 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18300 * receive sequence is only partially assembed by the driver, it shall abort
18301 * the partially assembled frames for the sequence. Otherwise, if the
18302 * unsolicited receive sequence has been completely assembled and passed to
Ferruh Yigit02e3e582020-07-28 15:56:06 +010018303 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
James Smart6669f9b2009-10-02 15:16:45 -040018304 * unsolicited sequence has been aborted. After that, it will issue a basic
18305 * accept to accept the abort.
18306 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040018307static void
James Smart6669f9b2009-10-02 15:16:45 -040018308lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18309 struct hbq_dmabuf *dmabuf)
18310{
18311 struct lpfc_hba *phba = vport->phba;
18312 struct fc_frame_header fc_hdr;
James Smart5ffc2662009-11-18 15:39:44 -050018313 uint32_t fctl;
James Smart6dd9e312013-01-03 15:43:37 -050018314 bool aborted;
James Smart6669f9b2009-10-02 15:16:45 -040018315
James Smart6669f9b2009-10-02 15:16:45 -040018316 /* Make a copy of fc_hdr before the dmabuf being released */
18317 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
James Smart5ffc2662009-11-18 15:39:44 -050018318 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
James Smart6669f9b2009-10-02 15:16:45 -040018319
James Smart5ffc2662009-11-18 15:39:44 -050018320 if (fctl & FC_FC_EX_CTX) {
James Smart6dd9e312013-01-03 15:43:37 -050018321 /* ABTS by responder to exchange, no cleanup needed */
18322 aborted = true;
James Smart5ffc2662009-11-18 15:39:44 -050018323 } else {
James Smart6dd9e312013-01-03 15:43:37 -050018324 /* ABTS by initiator to exchange, need to do cleanup */
18325 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18326 if (aborted == false)
18327 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
James Smart5ffc2662009-11-18 15:39:44 -050018328 }
James Smart6dd9e312013-01-03 15:43:37 -050018329 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18330
James Smart86c67372017-04-21 16:05:04 -070018331 if (phba->nvmet_support) {
18332 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18333 return;
18334 }
18335
James Smart6dd9e312013-01-03 15:43:37 -050018336 /* Respond with BA_ACC or BA_RJT accordingly */
18337 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
James Smart6669f9b2009-10-02 15:16:45 -040018338}
18339
18340/**
James Smart4f774512009-05-22 14:52:35 -040018341 * lpfc_seq_complete - Indicates if a sequence is complete
18342 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18343 *
18344 * This function checks the sequence, starting with the frame described by
18345 * @dmabuf, to see if all the frames associated with this sequence are present.
18346 * the frames associated with this sequence are linked to the @dmabuf using the
18347 * dbuf list. This function looks for two major things. 1) That the first frame
18348 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18349 * set. 3) That there are no holes in the sequence count. The function will
18350 * return 1 when the sequence is complete, otherwise it will return 0.
18351 **/
18352static int
18353lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18354{
18355 struct fc_frame_header *hdr;
18356 struct lpfc_dmabuf *d_buf;
18357 struct hbq_dmabuf *seq_dmabuf;
18358 uint32_t fctl;
18359 int seq_count = 0;
18360
18361 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18362 /* make sure first fame of sequence has a sequence count of zero */
18363 if (hdr->fh_seq_cnt != seq_count)
18364 return 0;
18365 fctl = (hdr->fh_f_ctl[0] << 16 |
18366 hdr->fh_f_ctl[1] << 8 |
18367 hdr->fh_f_ctl[2]);
18368 /* If last frame of sequence we can return success. */
18369 if (fctl & FC_FC_END_SEQ)
18370 return 1;
18371 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18372 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18373 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18374 /* If there is a hole in the sequence count then fail. */
James Smarteeead812009-12-21 17:01:23 -050018375 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
James Smart4f774512009-05-22 14:52:35 -040018376 return 0;
18377 fctl = (hdr->fh_f_ctl[0] << 16 |
18378 hdr->fh_f_ctl[1] << 8 |
18379 hdr->fh_f_ctl[2]);
18380 /* If last frame of sequence we can return success. */
18381 if (fctl & FC_FC_END_SEQ)
18382 return 1;
18383 }
18384 return 0;
18385}
18386
18387/**
18388 * lpfc_prep_seq - Prep sequence for ULP processing
18389 * @vport: Pointer to the vport on which this sequence was received
Lee Jones7af29d42020-07-21 17:41:31 +010018390 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
James Smart4f774512009-05-22 14:52:35 -040018391 *
18392 * This function takes a sequence, described by a list of frames, and creates
18393 * a list of iocbq structures to describe the sequence. This iocbq list will be
18394 * used to issue to the generic unsolicited sequence handler. This routine
18395 * returns a pointer to the first iocbq in the list. If the function is unable
18396 * to allocate an iocbq then it throw out the received frames that were not
18397 * able to be described and return a pointer to the first iocbq. If unable to
18398 * allocate any iocbqs (including the first) this function will return NULL.
18399 **/
18400static struct lpfc_iocbq *
18401lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18402{
James Smart7851fe22011-07-22 18:36:52 -040018403 struct hbq_dmabuf *hbq_buf;
James Smart4f774512009-05-22 14:52:35 -040018404 struct lpfc_dmabuf *d_buf, *n_buf;
18405 struct lpfc_iocbq *first_iocbq, *iocbq;
18406 struct fc_frame_header *fc_hdr;
18407 uint32_t sid;
James Smart7851fe22011-07-22 18:36:52 -040018408 uint32_t len, tot_len;
James Smarteeead812009-12-21 17:01:23 -050018409 struct ulp_bde64 *pbde;
James Smart4f774512009-05-22 14:52:35 -040018410
18411 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18412 /* remove from receive buffer list */
18413 list_del_init(&seq_dmabuf->hbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040018414 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040018415 /* get the Remote Port's SID */
James Smart6669f9b2009-10-02 15:16:45 -040018416 sid = sli4_sid_from_fc_hdr(fc_hdr);
James Smart7851fe22011-07-22 18:36:52 -040018417 tot_len = 0;
James Smart4f774512009-05-22 14:52:35 -040018418 /* Get an iocbq struct to fill in. */
18419 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18420 if (first_iocbq) {
18421 /* Initialize the first IOCB. */
James Smart8fa38512009-07-19 10:01:03 -040018422 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
James Smart4f774512009-05-22 14:52:35 -040018423 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
James Smart895427b2017-02-12 13:52:30 -080018424 first_iocbq->vport = vport;
James Smart939723a2012-05-09 21:19:03 -040018425
18426 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18427 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18428 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18429 first_iocbq->iocb.un.rcvels.parmRo =
18430 sli4_did_from_fc_hdr(fc_hdr);
18431 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18432 } else
18433 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
James Smart7851fe22011-07-22 18:36:52 -040018434 first_iocbq->iocb.ulpContext = NO_XRI;
18435 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18436 be16_to_cpu(fc_hdr->fh_ox_id);
18437 /* iocbq is prepped for internal consumption. Physical vpi. */
18438 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18439 vport->phba->vpi_ids[vport->vpi];
James Smart4f774512009-05-22 14:52:35 -040018440 /* put the first buffer into the first IOCBq */
James Smart48a5a662013-07-15 18:32:28 -040018441 tot_len = bf_get(lpfc_rcqe_length,
18442 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18443
James Smart4f774512009-05-22 14:52:35 -040018444 first_iocbq->context2 = &seq_dmabuf->dbuf;
18445 first_iocbq->context3 = NULL;
18446 first_iocbq->iocb.ulpBdeCount = 1;
James Smart48a5a662013-07-15 18:32:28 -040018447 if (tot_len > LPFC_DATA_BUF_SIZE)
18448 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
James Smart4f774512009-05-22 14:52:35 -040018449 LPFC_DATA_BUF_SIZE;
James Smart48a5a662013-07-15 18:32:28 -040018450 else
18451 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18452
James Smart4f774512009-05-22 14:52:35 -040018453 first_iocbq->iocb.un.rcvels.remoteID = sid;
James Smart48a5a662013-07-15 18:32:28 -040018454
James Smart7851fe22011-07-22 18:36:52 -040018455 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
James Smart4f774512009-05-22 14:52:35 -040018456 }
18457 iocbq = first_iocbq;
18458 /*
18459 * Each IOCBq can have two Buffers assigned, so go through the list
18460 * of buffers for this sequence and save two buffers in each IOCBq
18461 */
18462 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18463 if (!iocbq) {
18464 lpfc_in_buf_free(vport->phba, d_buf);
18465 continue;
18466 }
18467 if (!iocbq->context3) {
18468 iocbq->context3 = d_buf;
18469 iocbq->iocb.ulpBdeCount++;
James Smart7851fe22011-07-22 18:36:52 -040018470 /* We need to get the size out of the right CQE */
18471 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18472 len = bf_get(lpfc_rcqe_length,
18473 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040018474 pbde = (struct ulp_bde64 *)
18475 &iocbq->iocb.unsli3.sli3Words[4];
18476 if (len > LPFC_DATA_BUF_SIZE)
18477 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18478 else
18479 pbde->tus.f.bdeSize = len;
18480
James Smart7851fe22011-07-22 18:36:52 -040018481 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18482 tot_len += len;
James Smart4f774512009-05-22 14:52:35 -040018483 } else {
18484 iocbq = lpfc_sli_get_iocbq(vport->phba);
18485 if (!iocbq) {
18486 if (first_iocbq) {
18487 first_iocbq->iocb.ulpStatus =
18488 IOSTAT_FCP_RSP_ERROR;
18489 first_iocbq->iocb.un.ulpWord[4] =
18490 IOERR_NO_RESOURCES;
18491 }
18492 lpfc_in_buf_free(vport->phba, d_buf);
18493 continue;
18494 }
James Smart7851fe22011-07-22 18:36:52 -040018495 /* We need to get the size out of the right CQE */
18496 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18497 len = bf_get(lpfc_rcqe_length,
18498 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040018499 iocbq->context2 = d_buf;
18500 iocbq->context3 = NULL;
18501 iocbq->iocb.ulpBdeCount = 1;
18502 if (len > LPFC_DATA_BUF_SIZE)
18503 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18504 LPFC_DATA_BUF_SIZE;
18505 else
18506 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18507
James Smart7851fe22011-07-22 18:36:52 -040018508 tot_len += len;
18509 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18510
James Smart4f774512009-05-22 14:52:35 -040018511 iocbq->iocb.un.rcvels.remoteID = sid;
18512 list_add_tail(&iocbq->list, &first_iocbq->list);
18513 }
18514 }
James Smart39c4f1a2020-01-27 16:23:01 -080018515 /* Free the sequence's header buffer */
18516 if (!first_iocbq)
18517 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18518
James Smart4f774512009-05-22 14:52:35 -040018519 return first_iocbq;
18520}
18521
James Smart6669f9b2009-10-02 15:16:45 -040018522static void
18523lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18524 struct hbq_dmabuf *seq_dmabuf)
18525{
18526 struct fc_frame_header *fc_hdr;
18527 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18528 struct lpfc_hba *phba = vport->phba;
18529
18530 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18531 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18532 if (!iocbq) {
Dick Kennedy372c1872020-06-30 14:50:00 -070018533 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6669f9b2009-10-02 15:16:45 -040018534 "2707 Ring %d handler: Failed to allocate "
18535 "iocb Rctl x%x Type x%x received\n",
18536 LPFC_ELS_RING,
18537 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18538 return;
18539 }
18540 if (!lpfc_complete_unsol_iocb(phba,
James Smart895427b2017-02-12 13:52:30 -080018541 phba->sli4_hba.els_wq->pring,
James Smart6669f9b2009-10-02 15:16:45 -040018542 iocbq, fc_hdr->fh_r_ctl,
18543 fc_hdr->fh_type))
Dick Kennedy372c1872020-06-30 14:50:00 -070018544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6669f9b2009-10-02 15:16:45 -040018545 "2540 Ring %d handler: unexpected Rctl "
18546 "x%x Type x%x received\n",
18547 LPFC_ELS_RING,
18548 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18549
18550 /* Free iocb created in lpfc_prep_seq */
18551 list_for_each_entry_safe(curr_iocb, next_iocb,
18552 &iocbq->list, list) {
18553 list_del_init(&curr_iocb->list);
18554 lpfc_sli_release_iocbq(phba, curr_iocb);
18555 }
18556 lpfc_sli_release_iocbq(phba, iocbq);
18557}
18558
James Smartae9e28f2017-05-15 15:20:51 -070018559static void
18560lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18561 struct lpfc_iocbq *rspiocb)
18562{
18563 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18564
18565 if (pcmd && pcmd->virt)
Romain Perier771db5c2017-07-06 10:13:05 +020018566 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
James Smartae9e28f2017-05-15 15:20:51 -070018567 kfree(pcmd);
18568 lpfc_sli_release_iocbq(phba, cmdiocb);
James Smarte817e5d2018-12-13 15:17:53 -080018569 lpfc_drain_txq(phba);
James Smartae9e28f2017-05-15 15:20:51 -070018570}
18571
18572static void
18573lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18574 struct hbq_dmabuf *dmabuf)
18575{
18576 struct fc_frame_header *fc_hdr;
18577 struct lpfc_hba *phba = vport->phba;
18578 struct lpfc_iocbq *iocbq = NULL;
18579 union lpfc_wqe *wqe;
18580 struct lpfc_dmabuf *pcmd = NULL;
18581 uint32_t frame_len;
18582 int rc;
James Smarte817e5d2018-12-13 15:17:53 -080018583 unsigned long iflags;
James Smartae9e28f2017-05-15 15:20:51 -070018584
18585 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18586 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18587
18588 /* Send the received frame back */
18589 iocbq = lpfc_sli_get_iocbq(phba);
James Smarte817e5d2018-12-13 15:17:53 -080018590 if (!iocbq) {
18591 /* Queue cq event and wakeup worker thread to process it */
18592 spin_lock_irqsave(&phba->hbalock, iflags);
18593 list_add_tail(&dmabuf->cq_event.list,
18594 &phba->sli4_hba.sp_queue_event);
18595 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18596 spin_unlock_irqrestore(&phba->hbalock, iflags);
18597 lpfc_worker_wake_up(phba);
18598 return;
18599 }
James Smartae9e28f2017-05-15 15:20:51 -070018600
18601 /* Allocate buffer for command payload */
18602 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18603 if (pcmd)
Romain Perier771db5c2017-07-06 10:13:05 +020018604 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
James Smartae9e28f2017-05-15 15:20:51 -070018605 &pcmd->phys);
18606 if (!pcmd || !pcmd->virt)
18607 goto exit;
18608
18609 INIT_LIST_HEAD(&pcmd->list);
18610
18611 /* copyin the payload */
18612 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18613
18614 /* fill in BDE's for command */
18615 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18616 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18617 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18618 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18619
18620 iocbq->context2 = pcmd;
18621 iocbq->vport = vport;
18622 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18623 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18624
18625 /*
18626 * Setup rest of the iocb as though it were a WQE
18627 * Build the SEND_FRAME WQE
18628 */
18629 wqe = (union lpfc_wqe *)&iocbq->iocb;
18630
18631 wqe->send_frame.frame_len = frame_len;
18632 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18633 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18634 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18635 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18636 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18637 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18638
18639 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18640 iocbq->iocb.ulpLe = 1;
18641 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18642 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18643 if (rc == IOCB_ERROR)
18644 goto exit;
18645
18646 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18647 return;
18648
18649exit:
18650 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18651 "2023 Unable to process MDS loopback frame\n");
18652 if (pcmd && pcmd->virt)
Romain Perier771db5c2017-07-06 10:13:05 +020018653 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
James Smartae9e28f2017-05-15 15:20:51 -070018654 kfree(pcmd);
Dick Kennedy401bb412017-09-29 17:34:28 -070018655 if (iocbq)
18656 lpfc_sli_release_iocbq(phba, iocbq);
James Smartae9e28f2017-05-15 15:20:51 -070018657 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18658}
18659
James Smart4f774512009-05-22 14:52:35 -040018660/**
18661 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18662 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010018663 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
James Smart4f774512009-05-22 14:52:35 -040018664 *
18665 * This function is called with no lock held. This function processes all
18666 * the received buffers and gives it to upper layers when a received buffer
18667 * indicates that it is the final frame in the sequence. The interrupt
James Smart895427b2017-02-12 13:52:30 -080018668 * service routine processes received buffers at interrupt contexts.
James Smart4f774512009-05-22 14:52:35 -040018669 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18670 * appropriate receive function when the final frame in a sequence is received.
18671 **/
James Smart4d9ab992009-10-02 15:16:39 -040018672void
18673lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18674 struct hbq_dmabuf *dmabuf)
James Smart4f774512009-05-22 14:52:35 -040018675{
James Smart4d9ab992009-10-02 15:16:39 -040018676 struct hbq_dmabuf *seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040018677 struct fc_frame_header *fc_hdr;
18678 struct lpfc_vport *vport;
18679 uint32_t fcfi;
James Smart939723a2012-05-09 21:19:03 -040018680 uint32_t did;
James Smart4f774512009-05-22 14:52:35 -040018681
James Smart4f774512009-05-22 14:52:35 -040018682 /* Process each received buffer */
James Smart4d9ab992009-10-02 15:16:39 -040018683 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart2ea259e2017-02-12 13:52:27 -080018684
James Smarte817e5d2018-12-13 15:17:53 -080018685 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18686 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18687 vport = phba->pport;
18688 /* Handle MDS Loopback frames */
Dick Kennedy24411fc2020-08-03 14:02:25 -070018689 if (!(phba->pport->load_flag & FC_UNLOADING))
18690 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18691 else
18692 lpfc_in_buf_free(phba, &dmabuf->dbuf);
James Smarte817e5d2018-12-13 15:17:53 -080018693 return;
18694 }
18695
James Smart4d9ab992009-10-02 15:16:39 -040018696 /* check to see if this a valid type of frame */
18697 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18698 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18699 return;
18700 }
James Smart2ea259e2017-02-12 13:52:27 -080018701
James Smart7851fe22011-07-22 18:36:52 -040018702 if ((bf_get(lpfc_cqe_code,
18703 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18704 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18705 &dmabuf->cq_event.cqe.rcqe_cmpl);
18706 else
18707 fcfi = bf_get(lpfc_rcqe_fcf_id,
18708 &dmabuf->cq_event.cqe.rcqe_cmpl);
James Smart939723a2012-05-09 21:19:03 -040018709
James Smarte62245d2019-08-14 16:57:08 -070018710 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18711 vport = phba->pport;
18712 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18713 "2023 MDS Loopback %d bytes\n",
18714 bf_get(lpfc_rcqe_length,
18715 &dmabuf->cq_event.cqe.rcqe_cmpl));
18716 /* Handle MDS Loopback frames */
18717 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18718 return;
18719 }
18720
James Smart895427b2017-02-12 13:52:30 -080018721 /* d_id this frame is directed to */
18722 did = sli4_did_from_fc_hdr(fc_hdr);
18723
18724 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
James Smart939723a2012-05-09 21:19:03 -040018725 if (!vport) {
James Smart4d9ab992009-10-02 15:16:39 -040018726 /* throw out the frame */
18727 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18728 return;
18729 }
James Smart939723a2012-05-09 21:19:03 -040018730
James Smart939723a2012-05-09 21:19:03 -040018731 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18732 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18733 (did != Fabric_DID)) {
18734 /*
18735 * Throw out the frame if we are not pt2pt.
18736 * The pt2pt protocol allows for discovery frames
18737 * to be received without a registered VPI.
18738 */
18739 if (!(vport->fc_flag & FC_PT2PT) ||
18740 (phba->link_state == LPFC_HBA_READY)) {
18741 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18742 return;
18743 }
18744 }
18745
James Smart6669f9b2009-10-02 15:16:45 -040018746 /* Handle the basic abort sequence (BA_ABTS) event */
18747 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18748 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18749 return;
18750 }
18751
James Smart4d9ab992009-10-02 15:16:39 -040018752 /* Link this frame */
18753 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18754 if (!seq_dmabuf) {
18755 /* unable to add frame to vport - throw it out */
18756 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18757 return;
18758 }
18759 /* If not last frame in sequence continue processing frames. */
James Smartdef9c7a2009-12-21 17:02:28 -050018760 if (!lpfc_seq_complete(seq_dmabuf))
James Smart4d9ab992009-10-02 15:16:39 -040018761 return;
James Smartdef9c7a2009-12-21 17:02:28 -050018762
James Smart6669f9b2009-10-02 15:16:45 -040018763 /* Send the complete sequence to the upper layer protocol */
18764 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
James Smart4f774512009-05-22 14:52:35 -040018765}
James Smart6fb120a2009-05-22 14:52:59 -040018766
18767/**
18768 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18769 * @phba: pointer to lpfc hba data structure.
18770 *
18771 * This routine is invoked to post rpi header templates to the
18772 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040018773 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18774 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040018775 *
18776 * This routine does not require any locks. It's usage is expected
18777 * to be driver load or reset recovery when the driver is
18778 * sequential.
18779 *
18780 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020018781 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040018782 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040018783 * When this error occurs, the driver is not guaranteed
18784 * to have any rpi regions posted to the device and
18785 * must either attempt to repost the regions or take a
18786 * fatal error.
18787 **/
18788int
18789lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18790{
18791 struct lpfc_rpi_hdr *rpi_page;
18792 uint32_t rc = 0;
James Smart6d368e52011-05-24 11:44:12 -040018793 uint16_t lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018794
James Smart6d368e52011-05-24 11:44:12 -040018795 /* SLI4 ports that support extents do not require RPI headers. */
18796 if (!phba->sli4_hba.rpi_hdrs_in_use)
18797 goto exit;
18798 if (phba->sli4_hba.extents_in_use)
18799 return -EIO;
18800
James Smart6fb120a2009-05-22 14:52:59 -040018801 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
James Smart6d368e52011-05-24 11:44:12 -040018802 /*
18803 * Assign the rpi headers a physical rpi only if the driver
18804 * has not initialized those resources. A port reset only
18805 * needs the headers posted.
18806 */
18807 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18808 LPFC_RPI_RSRC_RDY)
18809 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18810
James Smart6fb120a2009-05-22 14:52:59 -040018811 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18812 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -070018813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040018814 "2008 Error %d posting all rpi "
18815 "headers\n", rc);
18816 rc = -EIO;
18817 break;
18818 }
18819 }
18820
James Smart6d368e52011-05-24 11:44:12 -040018821 exit:
18822 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18823 LPFC_RPI_RSRC_RDY);
James Smart6fb120a2009-05-22 14:52:59 -040018824 return rc;
18825}
18826
18827/**
18828 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18829 * @phba: pointer to lpfc hba data structure.
18830 * @rpi_page: pointer to the rpi memory region.
18831 *
18832 * This routine is invoked to post a single rpi header to the
18833 * HBA consistent with the SLI-4 interface spec. This memory region
18834 * maps up to 64 rpi context regions.
18835 *
18836 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020018837 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040018838 * -ENOMEM - No available memory
18839 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040018840 **/
18841int
18842lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18843{
18844 LPFC_MBOXQ_t *mboxq;
18845 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18846 uint32_t rc = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018847 uint32_t shdr_status, shdr_add_status;
18848 union lpfc_sli4_cfg_shdr *shdr;
18849
James Smart6d368e52011-05-24 11:44:12 -040018850 /* SLI4 ports that support extents do not require RPI headers. */
18851 if (!phba->sli4_hba.rpi_hdrs_in_use)
18852 return rc;
18853 if (phba->sli4_hba.extents_in_use)
18854 return -EIO;
18855
James Smart6fb120a2009-05-22 14:52:59 -040018856 /* The port is notified of the header region via a mailbox command. */
18857 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18858 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -070018859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040018860 "2001 Unable to allocate memory for issuing "
18861 "SLI_CONFIG_SPECIAL mailbox command\n");
18862 return -ENOMEM;
18863 }
18864
18865 /* Post all rpi memory regions to the port. */
18866 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
James Smart6fb120a2009-05-22 14:52:59 -040018867 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18868 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18869 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
James Smartfedd3b72011-02-16 12:39:24 -050018870 sizeof(struct lpfc_sli4_cfg_mhdr),
18871 LPFC_SLI4_MBX_EMBED);
James Smart6d368e52011-05-24 11:44:12 -040018872
18873
18874 /* Post the physical rpi to the port for this rpi header. */
James Smart6fb120a2009-05-22 14:52:59 -040018875 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18876 rpi_page->start_rpi);
James Smart6d368e52011-05-24 11:44:12 -040018877 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18878 hdr_tmpl, rpi_page->page_count);
18879
James Smart6fb120a2009-05-22 14:52:59 -040018880 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18881 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
James Smartf1126682009-06-10 17:22:44 -040018882 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart6fb120a2009-05-22 14:52:59 -040018883 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18884 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18885 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
James Smart304ee432021-04-11 18:31:17 -070018886 mempool_free(mboxq, phba->mbox_mem_pool);
James Smart6fb120a2009-05-22 14:52:59 -040018887 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070018888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040018889 "2514 POST_RPI_HDR mailbox failed with "
18890 "status x%x add_status x%x, mbx status x%x\n",
18891 shdr_status, shdr_add_status, rc);
18892 rc = -ENXIO;
James Smart845d9e82017-05-15 15:20:38 -070018893 } else {
18894 /*
18895 * The next_rpi stores the next logical module-64 rpi value used
18896 * to post physical rpis in subsequent rpi postings.
18897 */
18898 spin_lock_irq(&phba->hbalock);
18899 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18900 spin_unlock_irq(&phba->hbalock);
James Smart6fb120a2009-05-22 14:52:59 -040018901 }
18902 return rc;
18903}
18904
18905/**
18906 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18907 * @phba: pointer to lpfc hba data structure.
18908 *
18909 * This routine is invoked to post rpi header templates to the
18910 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040018911 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18912 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040018913 *
18914 * Returns
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020018915 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
James Smart6fb120a2009-05-22 14:52:59 -040018916 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18917 **/
18918int
18919lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18920{
James Smart6d368e52011-05-24 11:44:12 -040018921 unsigned long rpi;
18922 uint16_t max_rpi, rpi_limit;
18923 uint16_t rpi_remaining, lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040018924 struct lpfc_rpi_hdr *rpi_hdr;
James Smart4902b382013-10-10 12:20:35 -040018925 unsigned long iflag;
James Smart6fb120a2009-05-22 14:52:59 -040018926
James Smart6fb120a2009-05-22 14:52:59 -040018927 /*
James Smart6d368e52011-05-24 11:44:12 -040018928 * Fetch the next logical rpi. Because this index is logical,
18929 * the driver starts at 0 each time.
James Smart6fb120a2009-05-22 14:52:59 -040018930 */
James Smart4902b382013-10-10 12:20:35 -040018931 spin_lock_irqsave(&phba->hbalock, iflag);
James Smartbe6bb942015-04-07 15:07:22 -040018932 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18933 rpi_limit = phba->sli4_hba.next_rpi;
18934
James Smart6d368e52011-05-24 11:44:12 -040018935 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18936 if (rpi >= rpi_limit)
James Smart6fb120a2009-05-22 14:52:59 -040018937 rpi = LPFC_RPI_ALLOC_ERROR;
18938 else {
18939 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18940 phba->sli4_hba.max_cfg_param.rpi_used++;
18941 phba->sli4_hba.rpi_count++;
18942 }
James Smart0f154222019-09-21 20:58:52 -070018943 lpfc_printf_log(phba, KERN_INFO,
18944 LOG_NODE | LOG_DISCOVERY,
18945 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
James Smartbe6bb942015-04-07 15:07:22 -040018946 (int) rpi, max_rpi, rpi_limit);
James Smart6fb120a2009-05-22 14:52:59 -040018947
18948 /*
18949 * Don't try to allocate more rpi header regions if the device limit
James Smart6d368e52011-05-24 11:44:12 -040018950 * has been exhausted.
James Smart6fb120a2009-05-22 14:52:59 -040018951 */
18952 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18953 (phba->sli4_hba.rpi_count >= max_rpi)) {
James Smart4902b382013-10-10 12:20:35 -040018954 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040018955 return rpi;
18956 }
18957
18958 /*
James Smart6d368e52011-05-24 11:44:12 -040018959 * RPI header postings are not required for SLI4 ports capable of
18960 * extents.
18961 */
18962 if (!phba->sli4_hba.rpi_hdrs_in_use) {
James Smart4902b382013-10-10 12:20:35 -040018963 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6d368e52011-05-24 11:44:12 -040018964 return rpi;
18965 }
18966
18967 /*
James Smart6fb120a2009-05-22 14:52:59 -040018968 * If the driver is running low on rpi resources, allocate another
18969 * page now. Note that the next_rpi value is used because
18970 * it represents how many are actually in use whereas max_rpi notes
18971 * how many are supported max by the device.
18972 */
James Smart6d368e52011-05-24 11:44:12 -040018973 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
James Smart4902b382013-10-10 12:20:35 -040018974 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040018975 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18976 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18977 if (!rpi_hdr) {
Dick Kennedy372c1872020-06-30 14:50:00 -070018978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040018979 "2002 Error Could not grow rpi "
18980 "count\n");
18981 } else {
James Smart6d368e52011-05-24 11:44:12 -040018982 lrpi = rpi_hdr->start_rpi;
18983 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
James Smart6fb120a2009-05-22 14:52:59 -040018984 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18985 }
18986 }
18987
18988 return rpi;
18989}
18990
18991/**
Lee Jones8514e2f2021-03-03 14:46:18 +000018992 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
James Smart6fb120a2009-05-22 14:52:59 -040018993 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010018994 * @rpi: rpi to free
James Smart6fb120a2009-05-22 14:52:59 -040018995 *
18996 * This routine is invoked to release an rpi to the pool of
18997 * available rpis maintained by the driver.
18998 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040018999static void
James Smartd7c47992010-06-08 18:31:54 -040019000__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19001{
James Smart7cfd5632019-11-04 16:56:58 -080019002 /*
19003 * if the rpi value indicates a prior unreg has already
19004 * been done, skip the unreg.
19005 */
19006 if (rpi == LPFC_RPI_ALLOC_ERROR)
19007 return;
19008
James Smartd7c47992010-06-08 18:31:54 -040019009 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19010 phba->sli4_hba.rpi_count--;
19011 phba->sli4_hba.max_cfg_param.rpi_used--;
James Smartb95b2112019-08-14 16:56:47 -070019012 } else {
James Smart0f154222019-09-21 20:58:52 -070019013 lpfc_printf_log(phba, KERN_INFO,
19014 LOG_NODE | LOG_DISCOVERY,
James Smartb95b2112019-08-14 16:56:47 -070019015 "2016 rpi %x not inuse\n",
19016 rpi);
James Smartd7c47992010-06-08 18:31:54 -040019017 }
19018}
19019
19020/**
19021 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19022 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010019023 * @rpi: rpi to free
James Smartd7c47992010-06-08 18:31:54 -040019024 *
19025 * This routine is invoked to release an rpi to the pool of
19026 * available rpis maintained by the driver.
19027 **/
19028void
James Smart6fb120a2009-05-22 14:52:59 -040019029lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19030{
19031 spin_lock_irq(&phba->hbalock);
James Smartd7c47992010-06-08 18:31:54 -040019032 __lpfc_sli4_free_rpi(phba, rpi);
James Smart6fb120a2009-05-22 14:52:59 -040019033 spin_unlock_irq(&phba->hbalock);
19034}
19035
19036/**
19037 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19038 * @phba: pointer to lpfc hba data structure.
19039 *
19040 * This routine is invoked to remove the memory region that
19041 * provided rpi via a bitmask.
19042 **/
19043void
19044lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19045{
19046 kfree(phba->sli4_hba.rpi_bmask);
James Smart6d368e52011-05-24 11:44:12 -040019047 kfree(phba->sli4_hba.rpi_ids);
19048 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
James Smart6fb120a2009-05-22 14:52:59 -040019049}
19050
19051/**
19052 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
Lee Jones7af29d42020-07-21 17:41:31 +010019053 * @ndlp: pointer to lpfc nodelist data structure.
19054 * @cmpl: completion call-back.
19055 * @arg: data to load as MBox 'caller buffer information'
James Smart6fb120a2009-05-22 14:52:59 -040019056 *
19057 * This routine is invoked to remove the memory region that
19058 * provided rpi via a bitmask.
19059 **/
19060int
James Smart6b5151f2012-01-18 16:24:06 -050019061lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19062 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
James Smart6fb120a2009-05-22 14:52:59 -040019063{
19064 LPFC_MBOXQ_t *mboxq;
19065 struct lpfc_hba *phba = ndlp->phba;
19066 int rc;
19067
19068 /* The port is notified of the header region via a mailbox command. */
19069 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19070 if (!mboxq)
19071 return -ENOMEM;
19072
James Smart1037e4b2021-05-14 12:55:52 -070019073 /* If cmpl assigned, then this nlp_get pairs with
19074 * lpfc_mbx_cmpl_resume_rpi.
19075 *
19076 * Else cmpl is NULL, then this nlp_get pairs with
19077 * lpfc_sli_def_mbox_cmpl.
19078 */
19079 if (!lpfc_nlp_get(ndlp)) {
19080 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19081 "2122 %s: Failed to get nlp ref\n",
19082 __func__);
19083 mempool_free(mboxq, phba->mbox_mem_pool);
19084 return -EIO;
19085 }
19086
James Smart6fb120a2009-05-22 14:52:59 -040019087 /* Post all rpi memory regions to the port. */
19088 lpfc_resume_rpi(mboxq, ndlp);
James Smart6b5151f2012-01-18 16:24:06 -050019089 if (cmpl) {
19090 mboxq->mbox_cmpl = cmpl;
James Smart3e1f0712018-11-29 16:09:29 -080019091 mboxq->ctx_buf = arg;
James Smart72859902012-01-18 16:25:38 -050019092 } else
19093 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart1037e4b2021-05-14 12:55:52 -070019094 mboxq->ctx_ndlp = ndlp;
James Smart6b5151f2012-01-18 16:24:06 -050019095 mboxq->vport = ndlp->vport;
James Smart6fb120a2009-05-22 14:52:59 -040019096 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19097 if (rc == MBX_NOT_FINISHED) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019099 "2010 Resume RPI Mailbox failed "
19100 "status %d, mbxStatus x%x\n", rc,
19101 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smart1037e4b2021-05-14 12:55:52 -070019102 lpfc_nlp_put(ndlp);
James Smart6fb120a2009-05-22 14:52:59 -040019103 mempool_free(mboxq, phba->mbox_mem_pool);
19104 return -EIO;
19105 }
19106 return 0;
19107}
19108
19109/**
19110 * lpfc_sli4_init_vpi - Initialize a vpi with the port
James Smart76a95d72010-11-20 23:11:48 -050019111 * @vport: Pointer to the vport for which the vpi is being initialized
James Smart6fb120a2009-05-22 14:52:59 -040019112 *
James Smart76a95d72010-11-20 23:11:48 -050019113 * This routine is invoked to activate a vpi with the port.
James Smart6fb120a2009-05-22 14:52:59 -040019114 *
19115 * Returns:
19116 * 0 success
19117 * -Evalue otherwise
19118 **/
19119int
James Smart76a95d72010-11-20 23:11:48 -050019120lpfc_sli4_init_vpi(struct lpfc_vport *vport)
James Smart6fb120a2009-05-22 14:52:59 -040019121{
19122 LPFC_MBOXQ_t *mboxq;
19123 int rc = 0;
James Smart6a9c52c2009-10-02 15:16:51 -040019124 int retval = MBX_SUCCESS;
James Smart6fb120a2009-05-22 14:52:59 -040019125 uint32_t mbox_tmo;
James Smart76a95d72010-11-20 23:11:48 -050019126 struct lpfc_hba *phba = vport->phba;
James Smart6fb120a2009-05-22 14:52:59 -040019127 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19128 if (!mboxq)
19129 return -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -050019130 lpfc_init_vpi(phba, mboxq, vport->vpi);
James Smarta183a152011-10-10 21:32:43 -040019131 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
James Smart6fb120a2009-05-22 14:52:59 -040019132 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
James Smart6fb120a2009-05-22 14:52:59 -040019133 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019134 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019135 "2022 INIT VPI Mailbox failed "
19136 "status %d, mbxStatus x%x\n", rc,
19137 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smart6a9c52c2009-10-02 15:16:51 -040019138 retval = -EIO;
James Smart6fb120a2009-05-22 14:52:59 -040019139 }
James Smart6a9c52c2009-10-02 15:16:51 -040019140 if (rc != MBX_TIMEOUT)
James Smart76a95d72010-11-20 23:11:48 -050019141 mempool_free(mboxq, vport->phba->mbox_mem_pool);
James Smart6a9c52c2009-10-02 15:16:51 -040019142
19143 return retval;
James Smart6fb120a2009-05-22 14:52:59 -040019144}
19145
19146/**
19147 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19148 * @phba: pointer to lpfc hba data structure.
19149 * @mboxq: Pointer to mailbox object.
19150 *
19151 * This routine is invoked to manually add a single FCF record. The caller
19152 * must pass a completely initialized FCF_Record. This routine takes
19153 * care of the nonembedded mailbox operations.
19154 **/
19155static void
19156lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19157{
19158 void *virt_addr;
19159 union lpfc_sli4_cfg_shdr *shdr;
19160 uint32_t shdr_status, shdr_add_status;
19161
19162 virt_addr = mboxq->sge_array->addr[0];
19163 /* The IOCTL status is embedded in the mailbox subheader. */
19164 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19165 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19166 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19167
19168 if ((shdr_status || shdr_add_status) &&
19169 (shdr_status != STATUS_FCF_IN_USE))
Dick Kennedy372c1872020-06-30 14:50:00 -070019170 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019171 "2558 ADD_FCF_RECORD mailbox failed with "
19172 "status x%x add_status x%x\n",
19173 shdr_status, shdr_add_status);
19174
19175 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19176}
19177
19178/**
19179 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19180 * @phba: pointer to lpfc hba data structure.
19181 * @fcf_record: pointer to the initialized fcf record to add.
19182 *
19183 * This routine is invoked to manually add a single FCF record. The caller
19184 * must pass a completely initialized FCF_Record. This routine takes
19185 * care of the nonembedded mailbox operations.
19186 **/
19187int
19188lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19189{
19190 int rc = 0;
19191 LPFC_MBOXQ_t *mboxq;
19192 uint8_t *bytep;
19193 void *virt_addr;
James Smart6fb120a2009-05-22 14:52:59 -040019194 struct lpfc_mbx_sge sge;
19195 uint32_t alloc_len, req_len;
19196 uint32_t fcfindex;
19197
19198 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19199 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019201 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19202 return -ENOMEM;
19203 }
19204
19205 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19206 sizeof(uint32_t);
19207
19208 /* Allocate DMA memory and set up the non-embedded mailbox command */
19209 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19210 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19211 req_len, LPFC_SLI4_MBX_NEMBED);
19212 if (alloc_len < req_len) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019214 "2523 Allocated DMA memory size (x%x) is "
19215 "less than the requested DMA memory "
19216 "size (x%x)\n", alloc_len, req_len);
19217 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19218 return -ENOMEM;
19219 }
19220
19221 /*
19222 * Get the first SGE entry from the non-embedded DMA memory. This
19223 * routine only uses a single SGE.
19224 */
19225 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
James Smart6fb120a2009-05-22 14:52:59 -040019226 virt_addr = mboxq->sge_array->addr[0];
19227 /*
19228 * Configure the FCF record for FCFI 0. This is the driver's
19229 * hardcoded default and gets used in nonFIP mode.
19230 */
19231 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19232 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19233 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19234
19235 /*
19236 * Copy the fcf_index and the FCF Record Data. The data starts after
19237 * the FCoE header plus word10. The data copy needs to be endian
19238 * correct.
19239 */
19240 bytep += sizeof(uint32_t);
19241 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19242 mboxq->vport = phba->pport;
19243 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19244 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19245 if (rc == MBX_NOT_FINISHED) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019247 "2515 ADD_FCF_RECORD mailbox failed with "
19248 "status 0x%x\n", rc);
19249 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19250 rc = -EIO;
19251 } else
19252 rc = 0;
19253
19254 return rc;
19255}
19256
19257/**
19258 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19259 * @phba: pointer to lpfc hba data structure.
19260 * @fcf_record: pointer to the fcf record to write the default data.
19261 * @fcf_index: FCF table entry index.
19262 *
19263 * This routine is invoked to build the driver's default FCF record. The
19264 * values used are hardcoded. This routine handles memory initialization.
19265 *
19266 **/
19267void
19268lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19269 struct fcf_record *fcf_record,
19270 uint16_t fcf_index)
19271{
19272 memset(fcf_record, 0, sizeof(struct fcf_record));
19273 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19274 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19275 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19276 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19277 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19278 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19279 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19280 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19281 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19282 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19283 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19284 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19285 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
James Smart0c287582009-06-10 17:22:56 -040019286 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
James Smart6fb120a2009-05-22 14:52:59 -040019287 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19288 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19289 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19290 /* Set the VLAN bit map */
19291 if (phba->valid_vlan) {
19292 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19293 = 1 << (phba->vlan_id % 8);
19294 }
19295}
19296
19297/**
James Smart0c9ab6f2010-02-26 14:15:57 -050019298 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
James Smart6fb120a2009-05-22 14:52:59 -040019299 * @phba: pointer to lpfc hba data structure.
19300 * @fcf_index: FCF table entry offset.
19301 *
James Smart0c9ab6f2010-02-26 14:15:57 -050019302 * This routine is invoked to scan the entire FCF table by reading FCF
19303 * record and processing it one at a time starting from the @fcf_index
19304 * for initial FCF discovery or fast FCF failover rediscovery.
19305 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030019306 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050019307 * otherwise.
James Smart6fb120a2009-05-22 14:52:59 -040019308 **/
19309int
James Smart0c9ab6f2010-02-26 14:15:57 -050019310lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
James Smart6fb120a2009-05-22 14:52:59 -040019311{
19312 int rc = 0, error;
19313 LPFC_MBOXQ_t *mboxq;
James Smart6fb120a2009-05-22 14:52:59 -040019314
James Smart32b97932009-07-19 10:01:21 -040019315 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
James Smart80c17842012-03-01 22:35:45 -050019316 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
James Smart6fb120a2009-05-22 14:52:59 -040019317 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19318 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6fb120a2009-05-22 14:52:59 -040019320 "2000 Failed to allocate mbox for "
19321 "READ_FCF cmd\n");
James Smart4d9ab992009-10-02 15:16:39 -040019322 error = -ENOMEM;
James Smart0c9ab6f2010-02-26 14:15:57 -050019323 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040019324 }
James Smartecfd03c2010-02-12 14:41:27 -050019325 /* Construct the read FCF record mailbox command */
James Smart0c9ab6f2010-02-26 14:15:57 -050019326 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
James Smartecfd03c2010-02-12 14:41:27 -050019327 if (rc) {
19328 error = -EINVAL;
James Smart0c9ab6f2010-02-26 14:15:57 -050019329 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040019330 }
James Smartecfd03c2010-02-12 14:41:27 -050019331 /* Issue the mailbox command asynchronously */
James Smart6fb120a2009-05-22 14:52:59 -040019332 mboxq->vport = phba->pport;
James Smart0c9ab6f2010-02-26 14:15:57 -050019333 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
James Smarta93ff372010-10-22 11:06:08 -040019334
19335 spin_lock_irq(&phba->hbalock);
19336 phba->hba_flag |= FCF_TS_INPROG;
19337 spin_unlock_irq(&phba->hbalock);
19338
James Smart6fb120a2009-05-22 14:52:59 -040019339 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
James Smartecfd03c2010-02-12 14:41:27 -050019340 if (rc == MBX_NOT_FINISHED)
James Smart6fb120a2009-05-22 14:52:59 -040019341 error = -EIO;
James Smartecfd03c2010-02-12 14:41:27 -050019342 else {
James Smart38b92ef2010-08-04 16:11:39 -040019343 /* Reset eligible FCF count for new scan */
19344 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
James Smart999d8132010-03-15 11:24:56 -040019345 phba->fcf.eligible_fcf_cnt = 0;
James Smart6fb120a2009-05-22 14:52:59 -040019346 error = 0;
James Smart32b97932009-07-19 10:01:21 -040019347 }
James Smart0c9ab6f2010-02-26 14:15:57 -050019348fail_fcf_scan:
James Smart4d9ab992009-10-02 15:16:39 -040019349 if (error) {
19350 if (mboxq)
19351 lpfc_sli4_mbox_cmd_free(phba, mboxq);
James Smarta93ff372010-10-22 11:06:08 -040019352 /* FCF scan failed, clear FCF_TS_INPROG flag */
James Smart4d9ab992009-10-02 15:16:39 -040019353 spin_lock_irq(&phba->hbalock);
James Smarta93ff372010-10-22 11:06:08 -040019354 phba->hba_flag &= ~FCF_TS_INPROG;
James Smart4d9ab992009-10-02 15:16:39 -040019355 spin_unlock_irq(&phba->hbalock);
19356 }
James Smart6fb120a2009-05-22 14:52:59 -040019357 return error;
19358}
James Smarta0c87cb2009-07-19 10:01:10 -040019359
19360/**
James Smarta93ff372010-10-22 11:06:08 -040019361 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
James Smart0c9ab6f2010-02-26 14:15:57 -050019362 * @phba: pointer to lpfc hba data structure.
19363 * @fcf_index: FCF table entry offset.
19364 *
19365 * This routine is invoked to read an FCF record indicated by @fcf_index
James Smarta93ff372010-10-22 11:06:08 -040019366 * and to use it for FLOGI roundrobin FCF failover.
James Smart0c9ab6f2010-02-26 14:15:57 -050019367 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030019368 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050019369 * otherwise.
19370 **/
19371int
19372lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19373{
19374 int rc = 0, error;
19375 LPFC_MBOXQ_t *mboxq;
19376
19377 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19378 if (!mboxq) {
19379 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19380 "2763 Failed to allocate mbox for "
19381 "READ_FCF cmd\n");
19382 error = -ENOMEM;
19383 goto fail_fcf_read;
19384 }
19385 /* Construct the read FCF record mailbox command */
19386 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19387 if (rc) {
19388 error = -EINVAL;
19389 goto fail_fcf_read;
19390 }
19391 /* Issue the mailbox command asynchronously */
19392 mboxq->vport = phba->pport;
19393 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19394 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19395 if (rc == MBX_NOT_FINISHED)
19396 error = -EIO;
19397 else
19398 error = 0;
19399
19400fail_fcf_read:
19401 if (error && mboxq)
19402 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19403 return error;
19404}
19405
19406/**
19407 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19408 * @phba: pointer to lpfc hba data structure.
19409 * @fcf_index: FCF table entry offset.
19410 *
19411 * This routine is invoked to read an FCF record indicated by @fcf_index to
James Smarta93ff372010-10-22 11:06:08 -040019412 * determine whether it's eligible for FLOGI roundrobin failover list.
James Smart0c9ab6f2010-02-26 14:15:57 -050019413 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030019414 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050019415 * otherwise.
19416 **/
19417int
19418lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19419{
19420 int rc = 0, error;
19421 LPFC_MBOXQ_t *mboxq;
19422
19423 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19424 if (!mboxq) {
19425 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19426 "2758 Failed to allocate mbox for "
19427 "READ_FCF cmd\n");
19428 error = -ENOMEM;
19429 goto fail_fcf_read;
19430 }
19431 /* Construct the read FCF record mailbox command */
19432 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19433 if (rc) {
19434 error = -EINVAL;
19435 goto fail_fcf_read;
19436 }
19437 /* Issue the mailbox command asynchronously */
19438 mboxq->vport = phba->pport;
19439 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19440 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19441 if (rc == MBX_NOT_FINISHED)
19442 error = -EIO;
19443 else
19444 error = 0;
19445
19446fail_fcf_read:
19447 if (error && mboxq)
19448 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19449 return error;
19450}
19451
19452/**
James Smartf5cb5302015-12-16 18:11:52 -050019453 * lpfc_check_next_fcf_pri_level
Lee Jones7af29d42020-07-21 17:41:31 +010019454 * @phba: pointer to the lpfc_hba struct for this port.
James Smart7d791df2011-07-22 18:37:52 -040019455 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19456 * routine when the rr_bmask is empty. The FCF indecies are put into the
19457 * rr_bmask based on their priority level. Starting from the highest priority
19458 * to the lowest. The most likely FCF candidate will be in the highest
19459 * priority group. When this routine is called it searches the fcf_pri list for
19460 * next lowest priority group and repopulates the rr_bmask with only those
19461 * fcf_indexes.
19462 * returns:
19463 * 1=success 0=failure
19464 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040019465static int
James Smart7d791df2011-07-22 18:37:52 -040019466lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19467{
19468 uint16_t next_fcf_pri;
19469 uint16_t last_index;
19470 struct lpfc_fcf_pri *fcf_pri;
19471 int rc;
19472 int ret = 0;
19473
19474 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19475 LPFC_SLI4_FCF_TBL_INDX_MAX);
19476 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19477 "3060 Last IDX %d\n", last_index);
James Smart25626692013-03-01 16:36:54 -050019478
19479 /* Verify the priority list has 2 or more entries */
19480 spin_lock_irq(&phba->hbalock);
19481 if (list_empty(&phba->fcf.fcf_pri_list) ||
19482 list_is_singular(&phba->fcf.fcf_pri_list)) {
19483 spin_unlock_irq(&phba->hbalock);
James Smart7d791df2011-07-22 18:37:52 -040019484 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19485 "3061 Last IDX %d\n", last_index);
19486 return 0; /* Empty rr list */
19487 }
James Smart25626692013-03-01 16:36:54 -050019488 spin_unlock_irq(&phba->hbalock);
19489
James Smart7d791df2011-07-22 18:37:52 -040019490 next_fcf_pri = 0;
19491 /*
19492 * Clear the rr_bmask and set all of the bits that are at this
19493 * priority.
19494 */
19495 memset(phba->fcf.fcf_rr_bmask, 0,
19496 sizeof(*phba->fcf.fcf_rr_bmask));
19497 spin_lock_irq(&phba->hbalock);
19498 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19499 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19500 continue;
19501 /*
19502 * the 1st priority that has not FLOGI failed
19503 * will be the highest.
19504 */
19505 if (!next_fcf_pri)
19506 next_fcf_pri = fcf_pri->fcf_rec.priority;
19507 spin_unlock_irq(&phba->hbalock);
19508 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19509 rc = lpfc_sli4_fcf_rr_index_set(phba,
19510 fcf_pri->fcf_rec.fcf_index);
19511 if (rc)
19512 return 0;
19513 }
19514 spin_lock_irq(&phba->hbalock);
19515 }
19516 /*
19517 * if next_fcf_pri was not set above and the list is not empty then
19518 * we have failed flogis on all of them. So reset flogi failed
Anatol Pomozov4907cb72012-09-01 10:31:09 -070019519 * and start at the beginning.
James Smart7d791df2011-07-22 18:37:52 -040019520 */
19521 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19522 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19523 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19524 /*
19525 * the 1st priority that has not FLOGI failed
19526 * will be the highest.
19527 */
19528 if (!next_fcf_pri)
19529 next_fcf_pri = fcf_pri->fcf_rec.priority;
19530 spin_unlock_irq(&phba->hbalock);
19531 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19532 rc = lpfc_sli4_fcf_rr_index_set(phba,
19533 fcf_pri->fcf_rec.fcf_index);
19534 if (rc)
19535 return 0;
19536 }
19537 spin_lock_irq(&phba->hbalock);
19538 }
19539 } else
19540 ret = 1;
19541 spin_unlock_irq(&phba->hbalock);
19542
19543 return ret;
19544}
19545/**
James Smart0c9ab6f2010-02-26 14:15:57 -050019546 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19547 * @phba: pointer to lpfc hba data structure.
19548 *
19549 * This routine is to get the next eligible FCF record index in a round
19550 * robin fashion. If the next eligible FCF record index equals to the
James Smarta93ff372010-10-22 11:06:08 -040019551 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
James Smart0c9ab6f2010-02-26 14:15:57 -050019552 * shall be returned, otherwise, the next eligible FCF record's index
19553 * shall be returned.
19554 **/
19555uint16_t
19556lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19557{
19558 uint16_t next_fcf_index;
19559
James Smart421c6622013-01-03 15:44:16 -050019560initial_priority:
James Smart3804dc82010-07-14 15:31:37 -040019561 /* Search start from next bit of currently registered FCF index */
James Smart421c6622013-01-03 15:44:16 -050019562 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19563
James Smart7d791df2011-07-22 18:37:52 -040019564next_priority:
James Smart421c6622013-01-03 15:44:16 -050019565 /* Determine the next fcf index to check */
19566 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
James Smart0c9ab6f2010-02-26 14:15:57 -050019567 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19568 LPFC_SLI4_FCF_TBL_INDX_MAX,
James Smart3804dc82010-07-14 15:31:37 -040019569 next_fcf_index);
19570
James Smart0c9ab6f2010-02-26 14:15:57 -050019571 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
James Smart7d791df2011-07-22 18:37:52 -040019572 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19573 /*
19574 * If we have wrapped then we need to clear the bits that
19575 * have been tested so that we can detect when we should
19576 * change the priority level.
19577 */
James Smart0c9ab6f2010-02-26 14:15:57 -050019578 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19579 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
James Smart7d791df2011-07-22 18:37:52 -040019580 }
19581
James Smart0c9ab6f2010-02-26 14:15:57 -050019582
James Smart3804dc82010-07-14 15:31:37 -040019583 /* Check roundrobin failover list empty condition */
James Smart7d791df2011-07-22 18:37:52 -040019584 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19585 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19586 /*
19587 * If next fcf index is not found check if there are lower
19588 * Priority level fcf's in the fcf_priority list.
19589 * Set up the rr_bmask with all of the avaiable fcf bits
19590 * at that level and continue the selection process.
19591 */
19592 if (lpfc_check_next_fcf_pri_level(phba))
James Smart421c6622013-01-03 15:44:16 -050019593 goto initial_priority;
James Smart3804dc82010-07-14 15:31:37 -040019594 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19595 "2844 No roundrobin failover FCF available\n");
James Smart036cad12018-10-23 13:41:06 -070019596
19597 return LPFC_FCOE_FCF_NEXT_NONE;
James Smart3804dc82010-07-14 15:31:37 -040019598 }
19599
James Smart7d791df2011-07-22 18:37:52 -040019600 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19601 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
James Smartf5cb5302015-12-16 18:11:52 -050019602 LPFC_FCF_FLOGI_FAILED) {
19603 if (list_is_singular(&phba->fcf.fcf_pri_list))
19604 return LPFC_FCOE_FCF_NEXT_NONE;
19605
James Smart7d791df2011-07-22 18:37:52 -040019606 goto next_priority;
James Smartf5cb5302015-12-16 18:11:52 -050019607 }
James Smart7d791df2011-07-22 18:37:52 -040019608
James Smart3804dc82010-07-14 15:31:37 -040019609 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019610 "2845 Get next roundrobin failover FCF (x%x)\n",
19611 next_fcf_index);
19612
James Smart0c9ab6f2010-02-26 14:15:57 -050019613 return next_fcf_index;
19614}
19615
19616/**
19617 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19618 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010019619 * @fcf_index: index into the FCF table to 'set'
James Smart0c9ab6f2010-02-26 14:15:57 -050019620 *
19621 * This routine sets the FCF record index in to the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040019622 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050019623 * does not go beyond the range of the driver allocated bmask dimension
19624 * before setting the bit.
19625 *
19626 * Returns 0 if the index bit successfully set, otherwise, it returns
19627 * -EINVAL.
19628 **/
19629int
19630lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19631{
19632 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19633 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019634 "2610 FCF (x%x) reached driver's book "
19635 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050019636 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19637 return -EINVAL;
19638 }
19639 /* Set the eligible FCF record index bmask */
19640 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19641
James Smart3804dc82010-07-14 15:31:37 -040019642 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019643 "2790 Set FCF (x%x) to roundrobin FCF failover "
James Smart3804dc82010-07-14 15:31:37 -040019644 "bmask\n", fcf_index);
19645
James Smart0c9ab6f2010-02-26 14:15:57 -050019646 return 0;
19647}
19648
19649/**
James Smart3804dc82010-07-14 15:31:37 -040019650 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
James Smart0c9ab6f2010-02-26 14:15:57 -050019651 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010019652 * @fcf_index: index into the FCF table to 'clear'
James Smart0c9ab6f2010-02-26 14:15:57 -050019653 *
19654 * This routine clears the FCF record index from the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040019655 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050019656 * does not go beyond the range of the driver allocated bmask dimension
19657 * before clearing the bit.
19658 **/
19659void
19660lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19661{
James Smart9a803a72013-09-06 12:17:56 -040019662 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
James Smart0c9ab6f2010-02-26 14:15:57 -050019663 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019665 "2762 FCF (x%x) reached driver's book "
19666 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050019667 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19668 return;
19669 }
19670 /* Clear the eligible FCF record index bmask */
James Smart7d791df2011-07-22 18:37:52 -040019671 spin_lock_irq(&phba->hbalock);
James Smart9a803a72013-09-06 12:17:56 -040019672 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19673 list) {
James Smart7d791df2011-07-22 18:37:52 -040019674 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19675 list_del_init(&fcf_pri->list);
19676 break;
19677 }
19678 }
19679 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050019680 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
James Smart3804dc82010-07-14 15:31:37 -040019681
19682 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019683 "2791 Clear FCF (x%x) from roundrobin failover "
James Smart3804dc82010-07-14 15:31:37 -040019684 "bmask\n", fcf_index);
James Smart0c9ab6f2010-02-26 14:15:57 -050019685}
19686
19687/**
James Smartecfd03c2010-02-12 14:41:27 -050019688 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19689 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010019690 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
James Smartecfd03c2010-02-12 14:41:27 -050019691 *
19692 * This routine is the completion routine for the rediscover FCF table mailbox
19693 * command. If the mailbox command returned failure, it will try to stop the
19694 * FCF rediscover wait timer.
19695 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040019696static void
James Smartecfd03c2010-02-12 14:41:27 -050019697lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19698{
19699 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19700 uint32_t shdr_status, shdr_add_status;
19701
19702 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19703
19704 shdr_status = bf_get(lpfc_mbox_hdr_status,
19705 &redisc_fcf->header.cfg_shdr.response);
19706 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19707 &redisc_fcf->header.cfg_shdr.response);
19708 if (shdr_status || shdr_add_status) {
James Smart0c9ab6f2010-02-26 14:15:57 -050019709 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smartecfd03c2010-02-12 14:41:27 -050019710 "2746 Requesting for FCF rediscovery failed "
19711 "status x%x add_status x%x\n",
19712 shdr_status, shdr_add_status);
James Smart0c9ab6f2010-02-26 14:15:57 -050019713 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
James Smartfc2b9892010-02-26 14:15:29 -050019714 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050019715 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050019716 spin_unlock_irq(&phba->hbalock);
19717 /*
19718 * CVL event triggered FCF rediscover request failed,
19719 * last resort to re-try current registered FCF entry.
19720 */
19721 lpfc_retry_pport_discovery(phba);
19722 } else {
19723 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050019724 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050019725 spin_unlock_irq(&phba->hbalock);
19726 /*
19727 * DEAD FCF event triggered FCF rediscover request
19728 * failed, last resort to fail over as a link down
19729 * to FCF registration.
19730 */
19731 lpfc_sli4_fcf_dead_failthrough(phba);
19732 }
James Smart0c9ab6f2010-02-26 14:15:57 -050019733 } else {
19734 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040019735 "2775 Start FCF rediscover quiescent timer\n");
James Smartecfd03c2010-02-12 14:41:27 -050019736 /*
19737 * Start FCF rediscovery wait timer for pending FCF
19738 * before rescan FCF record table.
19739 */
19740 lpfc_fcf_redisc_wait_start_timer(phba);
James Smart0c9ab6f2010-02-26 14:15:57 -050019741 }
James Smartecfd03c2010-02-12 14:41:27 -050019742
19743 mempool_free(mbox, phba->mbox_mem_pool);
19744}
19745
19746/**
James Smart3804dc82010-07-14 15:31:37 -040019747 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
James Smartecfd03c2010-02-12 14:41:27 -050019748 * @phba: pointer to lpfc hba data structure.
19749 *
19750 * This routine is invoked to request for rediscovery of the entire FCF table
19751 * by the port.
19752 **/
19753int
19754lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19755{
19756 LPFC_MBOXQ_t *mbox;
19757 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19758 int rc, length;
19759
James Smart0c9ab6f2010-02-26 14:15:57 -050019760 /* Cancel retry delay timers to all vports before FCF rediscover */
19761 lpfc_cancel_all_vport_retry_delay_timer(phba);
19762
James Smartecfd03c2010-02-12 14:41:27 -050019763 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19764 if (!mbox) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019765 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartecfd03c2010-02-12 14:41:27 -050019766 "2745 Failed to allocate mbox for "
19767 "requesting FCF rediscover.\n");
19768 return -ENOMEM;
19769 }
19770
19771 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19772 sizeof(struct lpfc_sli4_cfg_mhdr));
19773 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19774 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19775 length, LPFC_SLI4_MBX_EMBED);
19776
19777 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19778 /* Set count to 0 for invalidating the entire FCF database */
19779 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19780
19781 /* Issue the mailbox command asynchronously */
19782 mbox->vport = phba->pport;
19783 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19784 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19785
19786 if (rc == MBX_NOT_FINISHED) {
19787 mempool_free(mbox, phba->mbox_mem_pool);
19788 return -EIO;
19789 }
19790 return 0;
19791}
19792
19793/**
James Smartfc2b9892010-02-26 14:15:29 -050019794 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19795 * @phba: pointer to lpfc hba data structure.
19796 *
19797 * This function is the failover routine as a last resort to the FCF DEAD
19798 * event when driver failed to perform fast FCF failover.
19799 **/
19800void
19801lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19802{
19803 uint32_t link_state;
19804
19805 /*
19806 * Last resort as FCF DEAD event failover will treat this as
19807 * a link down, but save the link state because we don't want
19808 * it to be changed to Link Down unless it is already down.
19809 */
19810 link_state = phba->link_state;
19811 lpfc_linkdown(phba);
19812 phba->link_state = link_state;
19813
19814 /* Unregister FCF if no devices connected to it */
19815 lpfc_unregister_unused_fcf(phba);
19816}
19817
19818/**
James Smart026abb82011-12-13 13:20:45 -050019819 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040019820 * @phba: pointer to lpfc hba data structure.
James Smart026abb82011-12-13 13:20:45 -050019821 * @rgn23_data: pointer to configure region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040019822 *
James Smart026abb82011-12-13 13:20:45 -050019823 * This function gets SLI3 port configure region 23 data through memory dump
19824 * mailbox command. When it successfully retrieves data, the size of the data
19825 * will be returned, otherwise, 0 will be returned.
James Smarta0c87cb2009-07-19 10:01:10 -040019826 **/
James Smart026abb82011-12-13 13:20:45 -050019827static uint32_t
19828lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
James Smarta0c87cb2009-07-19 10:01:10 -040019829{
19830 LPFC_MBOXQ_t *pmb = NULL;
19831 MAILBOX_t *mb;
James Smart026abb82011-12-13 13:20:45 -050019832 uint32_t offset = 0;
James Smarte4ec1022021-04-21 16:45:11 -070019833 int rc;
James Smarta0c87cb2009-07-19 10:01:10 -040019834
James Smart026abb82011-12-13 13:20:45 -050019835 if (!rgn23_data)
19836 return 0;
19837
James Smarta0c87cb2009-07-19 10:01:10 -040019838 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19839 if (!pmb) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart026abb82011-12-13 13:20:45 -050019841 "2600 failed to allocate mailbox memory\n");
19842 return 0;
James Smarta0c87cb2009-07-19 10:01:10 -040019843 }
19844 mb = &pmb->u.mb;
19845
James Smarta0c87cb2009-07-19 10:01:10 -040019846 do {
19847 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19848 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19849
19850 if (rc != MBX_SUCCESS) {
19851 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050019852 "2601 failed to read config "
19853 "region 23, rc 0x%x Status 0x%x\n",
19854 rc, mb->mbxStatus);
James Smarta0c87cb2009-07-19 10:01:10 -040019855 mb->un.varDmp.word_cnt = 0;
19856 }
19857 /*
19858 * dump mem may return a zero when finished or we got a
19859 * mailbox error, either way we are done.
19860 */
19861 if (mb->un.varDmp.word_cnt == 0)
19862 break;
James Smarta0c87cb2009-07-19 10:01:10 -040019863
James Smarte4ec1022021-04-21 16:45:11 -070019864 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19865 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19866
James Smarta0c87cb2009-07-19 10:01:10 -040019867 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
James Smarte4ec1022021-04-21 16:45:11 -070019868 rgn23_data + offset,
19869 mb->un.varDmp.word_cnt);
19870 offset += mb->un.varDmp.word_cnt;
19871 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
James Smarta0c87cb2009-07-19 10:01:10 -040019872
James Smart026abb82011-12-13 13:20:45 -050019873 mempool_free(pmb, phba->mbox_mem_pool);
19874 return offset;
19875}
19876
19877/**
19878 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19879 * @phba: pointer to lpfc hba data structure.
19880 * @rgn23_data: pointer to configure region 23 data.
19881 *
19882 * This function gets SLI4 port configure region 23 data through memory dump
19883 * mailbox command. When it successfully retrieves data, the size of the data
19884 * will be returned, otherwise, 0 will be returned.
19885 **/
19886static uint32_t
19887lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19888{
19889 LPFC_MBOXQ_t *mboxq = NULL;
19890 struct lpfc_dmabuf *mp = NULL;
19891 struct lpfc_mqe *mqe;
19892 uint32_t data_length = 0;
19893 int rc;
19894
19895 if (!rgn23_data)
19896 return 0;
19897
19898 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19899 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart026abb82011-12-13 13:20:45 -050019901 "3105 failed to allocate mailbox memory\n");
19902 return 0;
19903 }
19904
19905 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19906 goto out;
19907 mqe = &mboxq->u.mqe;
James Smart3e1f0712018-11-29 16:09:29 -080019908 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
James Smart026abb82011-12-13 13:20:45 -050019909 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19910 if (rc)
19911 goto out;
19912 data_length = mqe->un.mb_words[5];
19913 if (data_length == 0)
19914 goto out;
19915 if (data_length > DMP_RGN23_SIZE) {
19916 data_length = 0;
19917 goto out;
19918 }
19919 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19920out:
19921 mempool_free(mboxq, phba->mbox_mem_pool);
19922 if (mp) {
19923 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19924 kfree(mp);
19925 }
19926 return data_length;
19927}
19928
19929/**
19930 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19931 * @phba: pointer to lpfc hba data structure.
19932 *
19933 * This function read region 23 and parse TLV for port status to
19934 * decide if the user disaled the port. If the TLV indicates the
19935 * port is disabled, the hba_flag is set accordingly.
19936 **/
19937void
19938lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19939{
19940 uint8_t *rgn23_data = NULL;
19941 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19942 uint32_t offset = 0;
19943
19944 /* Get adapter Region 23 data */
19945 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19946 if (!rgn23_data)
19947 goto out;
19948
19949 if (phba->sli_rev < LPFC_SLI_REV4)
19950 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19951 else {
19952 if_type = bf_get(lpfc_sli_intf_if_type,
19953 &phba->sli4_hba.sli_intf);
19954 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19955 goto out;
19956 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19957 }
James Smarta0c87cb2009-07-19 10:01:10 -040019958
19959 if (!data_size)
19960 goto out;
19961
19962 /* Check the region signature first */
19963 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta0c87cb2009-07-19 10:01:10 -040019965 "2619 Config region 23 has bad signature\n");
19966 goto out;
19967 }
19968 offset += 4;
19969
19970 /* Check the data structure version */
19971 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
Dick Kennedy372c1872020-06-30 14:50:00 -070019972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta0c87cb2009-07-19 10:01:10 -040019973 "2620 Config region 23 has bad version\n");
19974 goto out;
19975 }
19976 offset += 4;
19977
19978 /* Parse TLV entries in the region */
19979 while (offset < data_size) {
19980 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19981 break;
19982 /*
19983 * If the TLV is not driver specific TLV or driver id is
19984 * not linux driver id, skip the record.
19985 */
19986 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19987 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19988 (rgn23_data[offset + 3] != 0)) {
19989 offset += rgn23_data[offset + 1] * 4 + 4;
19990 continue;
19991 }
19992
19993 /* Driver found a driver specific TLV in the config region */
19994 sub_tlv_len = rgn23_data[offset + 1] * 4;
19995 offset += 4;
19996 tlv_offset = 0;
19997
19998 /*
19999 * Search for configured port state sub-TLV.
20000 */
20001 while ((offset < data_size) &&
20002 (tlv_offset < sub_tlv_len)) {
20003 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20004 offset += 4;
20005 tlv_offset += 4;
20006 break;
20007 }
20008 if (rgn23_data[offset] != PORT_STE_TYPE) {
20009 offset += rgn23_data[offset + 1] * 4 + 4;
20010 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20011 continue;
20012 }
20013
20014 /* This HBA contains PORT_STE configured */
20015 if (!rgn23_data[offset + 2])
20016 phba->hba_flag |= LINK_DISABLED;
20017
20018 goto out;
20019 }
20020 }
James Smart026abb82011-12-13 13:20:45 -050020021
James Smarta0c87cb2009-07-19 10:01:10 -040020022out:
James Smarta0c87cb2009-07-19 10:01:10 -040020023 kfree(rgn23_data);
20024 return;
20025}
James Smart695a8142010-01-26 23:08:03 -050020026
20027/**
James Smart16a93e82021-07-07 11:43:34 -070020028 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20029 * @phba: pointer to lpfc hba data structure
20030 * @shdr_status: wr_object rsp's status field
20031 * @shdr_add_status: wr_object rsp's add_status field
20032 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20033 * @shdr_change_status: wr_object rsp's change_status field
20034 * @shdr_csf: wr_object rsp's csf bit
20035 *
20036 * This routine is intended to be called after a firmware write completes.
20037 * It will log next action items to be performed by the user to instantiate
20038 * the newly downloaded firmware or reason for incompatibility.
20039 **/
20040static void
20041lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20042 u32 shdr_add_status, u32 shdr_add_status_2,
20043 u32 shdr_change_status, u32 shdr_csf)
20044{
20045 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20046 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20047 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20048 "change_status x%02x, csf %01x\n", __func__,
20049 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20050 shdr_status, shdr_add_status, shdr_add_status_2,
20051 shdr_change_status, shdr_csf);
20052
20053 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20054 switch (shdr_add_status_2) {
20055 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20056 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20057 "4199 Firmware write failed: "
20058 "image incompatible with flash x%02x\n",
20059 phba->sli4_hba.flash_id);
20060 break;
20061 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20062 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20063 "4200 Firmware write failed: "
20064 "image incompatible with ASIC "
20065 "architecture x%02x\n",
20066 phba->sli4_hba.asic_rev);
20067 break;
20068 default:
20069 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20070 "4210 Firmware write failed: "
20071 "add_status_2 x%02x\n",
20072 shdr_add_status_2);
20073 break;
20074 }
20075 } else if (!shdr_status && !shdr_add_status) {
20076 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20077 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20078 if (shdr_csf)
20079 shdr_change_status =
20080 LPFC_CHANGE_STATUS_PCI_RESET;
20081 }
20082
20083 switch (shdr_change_status) {
20084 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20085 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20086 "3198 Firmware write complete: System "
20087 "reboot required to instantiate\n");
20088 break;
20089 case (LPFC_CHANGE_STATUS_FW_RESET):
20090 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20091 "3199 Firmware write complete: "
20092 "Firmware reset required to "
20093 "instantiate\n");
20094 break;
20095 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20096 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20097 "3200 Firmware write complete: Port "
20098 "Migration or PCI Reset required to "
20099 "instantiate\n");
20100 break;
20101 case (LPFC_CHANGE_STATUS_PCI_RESET):
20102 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20103 "3201 Firmware write complete: PCI "
20104 "Reset required to instantiate\n");
20105 break;
20106 default:
20107 break;
20108 }
20109 }
20110}
20111
20112/**
James Smart52d52442011-05-24 11:42:45 -040020113 * lpfc_wr_object - write an object to the firmware
20114 * @phba: HBA structure that indicates port to create a queue on.
20115 * @dmabuf_list: list of dmabufs to write to the port.
20116 * @size: the total byte value of the objects to write to the port.
20117 * @offset: the current offset to be used to start the transfer.
20118 *
20119 * This routine will create a wr_object mailbox command to send to the port.
20120 * the mailbox command will be constructed using the dma buffers described in
20121 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20122 * BDEs that the imbedded mailbox can support. The @offset variable will be
20123 * used to indicate the starting offset of the transfer and will also return
20124 * the offset after the write object mailbox has completed. @size is used to
20125 * determine the end of the object and whether the eof bit should be set.
20126 *
20127 * Return 0 is successful and offset will contain the the new offset to use
20128 * for the next write.
20129 * Return negative value for error cases.
20130 **/
20131int
20132lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20133 uint32_t size, uint32_t *offset)
20134{
20135 struct lpfc_mbx_wr_object *wr_object;
20136 LPFC_MBOXQ_t *mbox;
20137 int rc = 0, i = 0;
James Smart16a93e82021-07-07 11:43:34 -070020138 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20139 uint32_t shdr_change_status = 0, shdr_csf = 0;
James Smart52d52442011-05-24 11:42:45 -040020140 uint32_t mbox_tmo;
James Smart52d52442011-05-24 11:42:45 -040020141 struct lpfc_dmabuf *dmabuf;
20142 uint32_t written = 0;
James Smart50212672018-12-13 15:17:57 -080020143 bool check_change_status = false;
James Smart52d52442011-05-24 11:42:45 -040020144
20145 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20146 if (!mbox)
20147 return -ENOMEM;
20148
20149 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20150 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20151 sizeof(struct lpfc_mbx_wr_object) -
20152 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20153
20154 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20155 wr_object->u.request.write_offset = *offset;
20156 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20157 wr_object->u.request.object_name[0] =
20158 cpu_to_le32(wr_object->u.request.object_name[0]);
20159 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20160 list_for_each_entry(dmabuf, dmabuf_list, list) {
20161 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20162 break;
20163 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20164 wr_object->u.request.bde[i].addrHigh =
20165 putPaddrHigh(dmabuf->phys);
20166 if (written + SLI4_PAGE_SIZE >= size) {
20167 wr_object->u.request.bde[i].tus.f.bdeSize =
20168 (size - written);
20169 written += (size - written);
20170 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
James Smart50212672018-12-13 15:17:57 -080020171 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20172 check_change_status = true;
James Smart52d52442011-05-24 11:42:45 -040020173 } else {
20174 wr_object->u.request.bde[i].tus.f.bdeSize =
20175 SLI4_PAGE_SIZE;
20176 written += SLI4_PAGE_SIZE;
20177 }
20178 i++;
20179 }
20180 wr_object->u.request.bde_count = i;
20181 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20182 if (!phba->sli4_hba.intr_enable)
20183 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20184 else {
James Smarta183a152011-10-10 21:32:43 -040020185 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart52d52442011-05-24 11:42:45 -040020186 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20187 }
20188 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart50212672018-12-13 15:17:57 -080020189 shdr_status = bf_get(lpfc_mbox_hdr_status,
20190 &wr_object->header.cfg_shdr.response);
20191 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20192 &wr_object->header.cfg_shdr.response);
James Smart16a93e82021-07-07 11:43:34 -070020193 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20194 &wr_object->header.cfg_shdr.response);
James Smart50212672018-12-13 15:17:57 -080020195 if (check_change_status) {
20196 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20197 &wr_object->u.response);
James Smart16a93e82021-07-07 11:43:34 -070020198 shdr_csf = bf_get(lpfc_wr_object_csf,
20199 &wr_object->u.response);
James Smart50212672018-12-13 15:17:57 -080020200 }
James Smart16a93e82021-07-07 11:43:34 -070020201
James Smart304ee432021-04-11 18:31:17 -070020202 if (!phba->sli4_hba.intr_enable)
20203 mempool_free(mbox, phba->mbox_mem_pool);
20204 else if (rc != MBX_TIMEOUT)
James Smart52d52442011-05-24 11:42:45 -040020205 mempool_free(mbox, phba->mbox_mem_pool);
James Smart16a93e82021-07-07 11:43:34 -070020206 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070020207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart52d52442011-05-24 11:42:45 -040020208 "3025 Write Object mailbox failed with "
James Smart16a93e82021-07-07 11:43:34 -070020209 "status x%x add_status x%x, add_status_2 x%x, "
20210 "mbx status x%x\n",
20211 shdr_status, shdr_add_status, shdr_add_status_2,
20212 rc);
James Smart52d52442011-05-24 11:42:45 -040020213 rc = -ENXIO;
James Smart1feb8202018-02-22 08:18:47 -080020214 *offset = shdr_add_status;
James Smart16a93e82021-07-07 11:43:34 -070020215 } else {
James Smart52d52442011-05-24 11:42:45 -040020216 *offset += wr_object->u.response.actual_write_length;
James Smart16a93e82021-07-07 11:43:34 -070020217 }
20218
20219 if (rc || check_change_status)
20220 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20221 shdr_add_status_2, shdr_change_status,
20222 shdr_csf);
James Smart52d52442011-05-24 11:42:45 -040020223 return rc;
20224}
20225
20226/**
James Smart695a8142010-01-26 23:08:03 -050020227 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20228 * @vport: pointer to vport data structure.
20229 *
20230 * This function iterate through the mailboxq and clean up all REG_LOGIN
20231 * and REG_VPI mailbox commands associated with the vport. This function
20232 * is called when driver want to restart discovery of the vport due to
20233 * a Clear Virtual Link event.
20234 **/
20235void
20236lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20237{
20238 struct lpfc_hba *phba = vport->phba;
20239 LPFC_MBOXQ_t *mb, *nextmb;
20240 struct lpfc_dmabuf *mp;
James Smart78730cf2010-04-06 15:06:30 -040020241 struct lpfc_nodelist *ndlp;
James Smartd439d282010-09-29 11:18:45 -040020242 struct lpfc_nodelist *act_mbx_ndlp = NULL;
James Smartd439d282010-09-29 11:18:45 -040020243 LIST_HEAD(mbox_cmd_list);
James Smart63e801c2010-11-20 23:14:19 -050020244 uint8_t restart_loop;
James Smart695a8142010-01-26 23:08:03 -050020245
James Smartd439d282010-09-29 11:18:45 -040020246 /* Clean up internally queued mailbox commands with the vport */
James Smart695a8142010-01-26 23:08:03 -050020247 spin_lock_irq(&phba->hbalock);
20248 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20249 if (mb->vport != vport)
20250 continue;
20251
20252 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20253 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20254 continue;
20255
Zou Wei47018082021-06-08 08:51:33 +080020256 list_move_tail(&mb->list, &mbox_cmd_list);
James Smartd439d282010-09-29 11:18:45 -040020257 }
20258 /* Clean up active mailbox command with the vport */
20259 mb = phba->sli.mbox_active;
20260 if (mb && (mb->vport == vport)) {
20261 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20262 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20263 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20264 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -080020265 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
James Smartd439d282010-09-29 11:18:45 -040020266 /* Put reference count for delayed processing */
20267 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20268 /* Unregister the RPI when mailbox complete */
20269 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20270 }
20271 }
James Smart63e801c2010-11-20 23:14:19 -050020272 /* Cleanup any mailbox completions which are not yet processed */
20273 do {
20274 restart_loop = 0;
20275 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20276 /*
20277 * If this mailox is already processed or it is
20278 * for another vport ignore it.
20279 */
20280 if ((mb->vport != vport) ||
20281 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20282 continue;
20283
20284 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20285 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20286 continue;
20287
20288 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20289 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -080020290 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
James Smart63e801c2010-11-20 23:14:19 -050020291 /* Unregister the RPI when mailbox complete */
20292 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20293 restart_loop = 1;
20294 spin_unlock_irq(&phba->hbalock);
James Smartc6adba12020-11-15 11:26:34 -080020295 spin_lock(&ndlp->lock);
James Smart63e801c2010-11-20 23:14:19 -050020296 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
James Smartc6adba12020-11-15 11:26:34 -080020297 spin_unlock(&ndlp->lock);
James Smart63e801c2010-11-20 23:14:19 -050020298 spin_lock_irq(&phba->hbalock);
20299 break;
20300 }
20301 }
20302 } while (restart_loop);
20303
James Smartd439d282010-09-29 11:18:45 -040020304 spin_unlock_irq(&phba->hbalock);
20305
20306 /* Release the cleaned-up mailbox commands */
20307 while (!list_empty(&mbox_cmd_list)) {
20308 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
James Smart695a8142010-01-26 23:08:03 -050020309 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
James Smart3e1f0712018-11-29 16:09:29 -080020310 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
James Smart695a8142010-01-26 23:08:03 -050020311 if (mp) {
20312 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20313 kfree(mp);
20314 }
James Smart3e1f0712018-11-29 16:09:29 -080020315 mb->ctx_buf = NULL;
20316 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20317 mb->ctx_ndlp = NULL;
James Smart78730cf2010-04-06 15:06:30 -040020318 if (ndlp) {
James Smartc6adba12020-11-15 11:26:34 -080020319 spin_lock(&ndlp->lock);
James Smart589a52d2010-07-14 15:30:54 -040020320 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
James Smartc6adba12020-11-15 11:26:34 -080020321 spin_unlock(&ndlp->lock);
James Smart78730cf2010-04-06 15:06:30 -040020322 lpfc_nlp_put(ndlp);
James Smart78730cf2010-04-06 15:06:30 -040020323 }
James Smart695a8142010-01-26 23:08:03 -050020324 }
James Smart695a8142010-01-26 23:08:03 -050020325 mempool_free(mb, phba->mbox_mem_pool);
20326 }
James Smartd439d282010-09-29 11:18:45 -040020327
20328 /* Release the ndlp with the cleaned-up active mailbox command */
20329 if (act_mbx_ndlp) {
James Smartc6adba12020-11-15 11:26:34 -080020330 spin_lock(&act_mbx_ndlp->lock);
James Smartd439d282010-09-29 11:18:45 -040020331 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
James Smartc6adba12020-11-15 11:26:34 -080020332 spin_unlock(&act_mbx_ndlp->lock);
James Smartd439d282010-09-29 11:18:45 -040020333 lpfc_nlp_put(act_mbx_ndlp);
James Smart695a8142010-01-26 23:08:03 -050020334 }
James Smart695a8142010-01-26 23:08:03 -050020335}
20336
James Smart2a9bf3d2010-06-07 15:24:45 -040020337/**
20338 * lpfc_drain_txq - Drain the txq
20339 * @phba: Pointer to HBA context object.
20340 *
20341 * This function attempt to submit IOCBs on the txq
20342 * to the adapter. For SLI4 adapters, the txq contains
20343 * ELS IOCBs that have been deferred because the there
20344 * are no SGLs. This congestion can occur with large
20345 * vport counts during node discovery.
20346 **/
20347
20348uint32_t
20349lpfc_drain_txq(struct lpfc_hba *phba)
20350{
20351 LIST_HEAD(completions);
James Smart895427b2017-02-12 13:52:30 -080020352 struct lpfc_sli_ring *pring;
Daeseok Youn2e706372014-02-21 09:03:32 +090020353 struct lpfc_iocbq *piocbq = NULL;
James Smart2a9bf3d2010-06-07 15:24:45 -040020354 unsigned long iflags = 0;
20355 char *fail_msg = NULL;
20356 struct lpfc_sglq *sglq;
James Smart205e8242018-03-05 12:04:03 -080020357 union lpfc_wqe128 wqe;
James Smarta2fc4aef2014-09-03 12:57:55 -040020358 uint32_t txq_cnt = 0;
James Smartdc19e3b2018-05-24 21:08:57 -070020359 struct lpfc_queue *wq;
James Smart2a9bf3d2010-06-07 15:24:45 -040020360
James Smartdc19e3b2018-05-24 21:08:57 -070020361 if (phba->link_flag & LS_MDS_LOOPBACK) {
20362 /* MDS WQE are posted only to first WQ*/
James Smartc00f62e2019-08-14 16:57:11 -070020363 wq = phba->sli4_hba.hdwq[0].io_wq;
James Smartdc19e3b2018-05-24 21:08:57 -070020364 if (unlikely(!wq))
20365 return 0;
20366 pring = wq->pring;
20367 } else {
20368 wq = phba->sli4_hba.els_wq;
20369 if (unlikely(!wq))
20370 return 0;
20371 pring = lpfc_phba_elsring(phba);
20372 }
20373
20374 if (unlikely(!pring) || list_empty(&pring->txq))
Dick Kennedy1234a6d2017-09-29 17:34:29 -070020375 return 0;
James Smart895427b2017-02-12 13:52:30 -080020376
James Smart398d81c2013-05-31 17:04:19 -040020377 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart0e9bb8d2013-03-01 16:35:12 -050020378 list_for_each_entry(piocbq, &pring->txq, list) {
20379 txq_cnt++;
20380 }
20381
20382 if (txq_cnt > pring->txq_max)
20383 pring->txq_max = txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040020384
James Smart398d81c2013-05-31 17:04:19 -040020385 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040020386
James Smart0e9bb8d2013-03-01 16:35:12 -050020387 while (!list_empty(&pring->txq)) {
James Smart398d81c2013-05-31 17:04:19 -040020388 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040020389
James Smart19ca7602010-11-20 23:11:55 -050020390 piocbq = lpfc_sli_ringtx_get(phba, pring);
James Smarta6298522012-06-12 13:54:11 -040020391 if (!piocbq) {
James Smart398d81c2013-05-31 17:04:19 -040020392 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedy372c1872020-06-30 14:50:00 -070020393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta6298522012-06-12 13:54:11 -040020394 "2823 txq empty and txq_cnt is %d\n ",
James Smart0e9bb8d2013-03-01 16:35:12 -050020395 txq_cnt);
James Smarta6298522012-06-12 13:54:11 -040020396 break;
20397 }
James Smart895427b2017-02-12 13:52:30 -080020398 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -040020399 if (!sglq) {
James Smart19ca7602010-11-20 23:11:55 -050020400 __lpfc_sli_ringtx_put(phba, pring, piocbq);
James Smart398d81c2013-05-31 17:04:19 -040020401 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040020402 break;
James Smart2a9bf3d2010-06-07 15:24:45 -040020403 }
James Smart0e9bb8d2013-03-01 16:35:12 -050020404 txq_cnt--;
James Smart2a9bf3d2010-06-07 15:24:45 -040020405
20406 /* The xri and iocb resources secured,
20407 * attempt to issue request
20408 */
James Smart6d368e52011-05-24 11:44:12 -040020409 piocbq->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040020410 piocbq->sli4_xritag = sglq->sli4_xritag;
20411 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20412 fail_msg = "to convert bpl to sgl";
James Smart205e8242018-03-05 12:04:03 -080020413 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040020414 fail_msg = "to convert iocb to wqe";
James Smartdc19e3b2018-05-24 21:08:57 -070020415 else if (lpfc_sli4_wq_put(wq, &wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040020416 fail_msg = " - Wq is full";
20417 else
20418 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20419
20420 if (fail_msg) {
20421 /* Failed means we can't issue and need to cancel */
Dick Kennedy372c1872020-06-30 14:50:00 -070020422 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2a9bf3d2010-06-07 15:24:45 -040020423 "2822 IOCB failed %s iotag 0x%x "
20424 "xri 0x%x\n",
20425 fail_msg,
20426 piocbq->iotag, piocbq->sli4_xritag);
20427 list_add_tail(&piocbq->list, &completions);
20428 }
James Smart398d81c2013-05-31 17:04:19 -040020429 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040020430 }
20431
James Smart2a9bf3d2010-06-07 15:24:45 -040020432 /* Cancel all the IOCBs that cannot be issued */
20433 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20434 IOERR_SLI_ABORTED);
20435
James Smart0e9bb8d2013-03-01 16:35:12 -050020436 return txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040020437}
James Smart895427b2017-02-12 13:52:30 -080020438
20439/**
20440 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20441 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010020442 * @pwqeq: Pointer to command WQE.
James Smart895427b2017-02-12 13:52:30 -080020443 * @sglq: Pointer to the scatter gather queue object.
20444 *
20445 * This routine converts the bpl or bde that is in the WQE
20446 * to a sgl list for the sli4 hardware. The physical address
20447 * of the bpl/bde is converted back to a virtual address.
20448 * If the WQE contains a BPL then the list of BDE's is
20449 * converted to sli4_sge's. If the WQE contains a single
20450 * BDE then it is converted to a single sli_sge.
20451 * The WQE is still in cpu endianness so the contents of
20452 * the bpl can be used without byte swapping.
20453 *
20454 * Returns valid XRI = Success, NO_XRI = Failure.
20455 */
20456static uint16_t
20457lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20458 struct lpfc_sglq *sglq)
20459{
20460 uint16_t xritag = NO_XRI;
20461 struct ulp_bde64 *bpl = NULL;
20462 struct ulp_bde64 bde;
20463 struct sli4_sge *sgl = NULL;
20464 struct lpfc_dmabuf *dmabuf;
James Smart205e8242018-03-05 12:04:03 -080020465 union lpfc_wqe128 *wqe;
James Smart895427b2017-02-12 13:52:30 -080020466 int numBdes = 0;
20467 int i = 0;
20468 uint32_t offset = 0; /* accumulated offset in the sg request list */
20469 int inbound = 0; /* number of sg reply entries inbound from firmware */
20470 uint32_t cmd;
20471
20472 if (!pwqeq || !sglq)
20473 return xritag;
20474
20475 sgl = (struct sli4_sge *)sglq->sgl;
20476 wqe = &pwqeq->wqe;
20477 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20478
20479 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20480 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20481 return sglq->sli4_xritag;
20482 numBdes = pwqeq->rsvd2;
20483 if (numBdes) {
20484 /* The addrHigh and addrLow fields within the WQE
20485 * have not been byteswapped yet so there is no
20486 * need to swap them back.
20487 */
20488 if (pwqeq->context3)
20489 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20490 else
20491 return xritag;
20492
20493 bpl = (struct ulp_bde64 *)dmabuf->virt;
20494 if (!bpl)
20495 return xritag;
20496
20497 for (i = 0; i < numBdes; i++) {
20498 /* Should already be byte swapped. */
20499 sgl->addr_hi = bpl->addrHigh;
20500 sgl->addr_lo = bpl->addrLow;
20501
20502 sgl->word2 = le32_to_cpu(sgl->word2);
20503 if ((i+1) == numBdes)
20504 bf_set(lpfc_sli4_sge_last, sgl, 1);
20505 else
20506 bf_set(lpfc_sli4_sge_last, sgl, 0);
20507 /* swap the size field back to the cpu so we
20508 * can assign it to the sgl.
20509 */
20510 bde.tus.w = le32_to_cpu(bpl->tus.w);
20511 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20512 /* The offsets in the sgl need to be accumulated
20513 * separately for the request and reply lists.
20514 * The request is always first, the reply follows.
20515 */
20516 switch (cmd) {
20517 case CMD_GEN_REQUEST64_WQE:
20518 /* add up the reply sg entries */
20519 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20520 inbound++;
20521 /* first inbound? reset the offset */
20522 if (inbound == 1)
20523 offset = 0;
20524 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20525 bf_set(lpfc_sli4_sge_type, sgl,
20526 LPFC_SGE_TYPE_DATA);
20527 offset += bde.tus.f.bdeSize;
20528 break;
20529 case CMD_FCP_TRSP64_WQE:
20530 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20531 bf_set(lpfc_sli4_sge_type, sgl,
20532 LPFC_SGE_TYPE_DATA);
20533 break;
20534 case CMD_FCP_TSEND64_WQE:
20535 case CMD_FCP_TRECEIVE64_WQE:
20536 bf_set(lpfc_sli4_sge_type, sgl,
20537 bpl->tus.f.bdeFlags);
20538 if (i < 3)
20539 offset = 0;
20540 else
20541 offset += bde.tus.f.bdeSize;
20542 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20543 break;
20544 }
20545 sgl->word2 = cpu_to_le32(sgl->word2);
20546 bpl++;
20547 sgl++;
20548 }
20549 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20550 /* The addrHigh and addrLow fields of the BDE have not
20551 * been byteswapped yet so they need to be swapped
20552 * before putting them in the sgl.
20553 */
20554 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20555 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20556 sgl->word2 = le32_to_cpu(sgl->word2);
20557 bf_set(lpfc_sli4_sge_last, sgl, 1);
20558 sgl->word2 = cpu_to_le32(sgl->word2);
20559 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20560 }
20561 return sglq->sli4_xritag;
20562}
20563
20564/**
20565 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20566 * @phba: Pointer to HBA context object.
Lee Jones7af29d42020-07-21 17:41:31 +010020567 * @qp: Pointer to HDW queue.
James Smart895427b2017-02-12 13:52:30 -080020568 * @pwqe: Pointer to command WQE.
20569 **/
20570int
James Smart1fbf9742019-01-28 11:14:26 -080020571lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
James Smart895427b2017-02-12 13:52:30 -080020572 struct lpfc_iocbq *pwqe)
20573{
James Smart205e8242018-03-05 12:04:03 -080020574 union lpfc_wqe128 *wqe = &pwqe->wqe;
James Smart7cacae22020-03-31 09:50:03 -070020575 struct lpfc_async_xchg_ctx *ctxp;
James Smart895427b2017-02-12 13:52:30 -080020576 struct lpfc_queue *wq;
20577 struct lpfc_sglq *sglq;
20578 struct lpfc_sli_ring *pring;
20579 unsigned long iflags;
Dick Kennedycd22d602017-08-23 16:55:35 -070020580 uint32_t ret = 0;
James Smart895427b2017-02-12 13:52:30 -080020581
20582 /* NVME_LS and NVME_LS ABTS requests. */
20583 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20584 pring = phba->sli4_hba.nvmels_wq->pring;
James Smart6a828b02019-01-28 11:14:31 -080020585 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20586 qp, wq_access);
James Smart895427b2017-02-12 13:52:30 -080020587 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20588 if (!sglq) {
20589 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20590 return WQE_BUSY;
20591 }
20592 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20593 pwqe->sli4_xritag = sglq->sli4_xritag;
20594 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20595 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20596 return WQE_ERROR;
20597 }
20598 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20599 pwqe->sli4_xritag);
Dick Kennedycd22d602017-08-23 16:55:35 -070020600 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20601 if (ret) {
James Smart895427b2017-02-12 13:52:30 -080020602 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedycd22d602017-08-23 16:55:35 -070020603 return ret;
James Smart895427b2017-02-12 13:52:30 -080020604 }
Dick Kennedycd22d602017-08-23 16:55:35 -070020605
James Smart895427b2017-02-12 13:52:30 -080020606 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20607 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080020608
20609 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
James Smart895427b2017-02-12 13:52:30 -080020610 return 0;
20611 }
20612
20613 /* NVME_FCREQ and NVME_ABTS requests */
James Smartda255e22020-11-15 11:26:42 -080020614 if (pwqe->iocb_flag & LPFC_IO_NVME ||
20615 pwqe->iocb_flag & LPFC_IO_FCP) {
James Smart895427b2017-02-12 13:52:30 -080020616 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
James Smartc00f62e2019-08-14 16:57:11 -070020617 wq = qp->io_wq;
James Smart1fbf9742019-01-28 11:14:26 -080020618 pring = wq->pring;
James Smart895427b2017-02-12 13:52:30 -080020619
James Smartc00f62e2019-08-14 16:57:11 -070020620 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
James Smart895427b2017-02-12 13:52:30 -080020621
James Smart6a828b02019-01-28 11:14:31 -080020622 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20623 qp, wq_access);
Dick Kennedycd22d602017-08-23 16:55:35 -070020624 ret = lpfc_sli4_wq_put(wq, wqe);
20625 if (ret) {
James Smart895427b2017-02-12 13:52:30 -080020626 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedycd22d602017-08-23 16:55:35 -070020627 return ret;
James Smart895427b2017-02-12 13:52:30 -080020628 }
20629 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20630 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080020631
20632 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
James Smart895427b2017-02-12 13:52:30 -080020633 return 0;
20634 }
20635
James Smartf358dd02017-02-12 13:52:34 -080020636 /* NVMET requests */
20637 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20638 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
James Smartc00f62e2019-08-14 16:57:11 -070020639 wq = qp->io_wq;
James Smart1fbf9742019-01-28 11:14:26 -080020640 pring = wq->pring;
James Smartf358dd02017-02-12 13:52:34 -080020641
James Smartf358dd02017-02-12 13:52:34 -080020642 ctxp = pwqe->context2;
James Smart6c621a22017-05-15 15:20:45 -070020643 sglq = ctxp->ctxbuf->sglq;
James Smartf358dd02017-02-12 13:52:34 -080020644 if (pwqe->sli4_xritag == NO_XRI) {
20645 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20646 pwqe->sli4_xritag = sglq->sli4_xritag;
20647 }
20648 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20649 pwqe->sli4_xritag);
James Smartc00f62e2019-08-14 16:57:11 -070020650 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
James Smart1fbf9742019-01-28 11:14:26 -080020651
James Smart6a828b02019-01-28 11:14:31 -080020652 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20653 qp, wq_access);
Dick Kennedycd22d602017-08-23 16:55:35 -070020654 ret = lpfc_sli4_wq_put(wq, wqe);
20655 if (ret) {
James Smartf358dd02017-02-12 13:52:34 -080020656 spin_unlock_irqrestore(&pring->ring_lock, iflags);
Dick Kennedycd22d602017-08-23 16:55:35 -070020657 return ret;
James Smartf358dd02017-02-12 13:52:34 -080020658 }
20659 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20660 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart93a4d6f2019-11-04 16:57:05 -080020661
20662 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
James Smartf358dd02017-02-12 13:52:34 -080020663 return 0;
20664 }
James Smart895427b2017-02-12 13:52:30 -080020665 return WQE_ERROR;
20666}
James Smartc4908502019-01-28 11:14:28 -080020667
James Smartdb7531d2020-11-15 11:26:44 -080020668/**
20669 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
20670 * @phba: Pointer to HBA context object.
20671 * @cmdiocb: Pointer to driver command iocb object.
20672 * @cmpl: completion function.
20673 *
20674 * Fill the appropriate fields for the abort WQE and call
20675 * internal routine lpfc_sli4_issue_wqe to send the WQE
20676 * This function is called with hbalock held and no ring_lock held.
20677 *
20678 * RETURNS 0 - SUCCESS
20679 **/
20680
20681int
20682lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
20683 void *cmpl)
20684{
20685 struct lpfc_vport *vport = cmdiocb->vport;
20686 struct lpfc_iocbq *abtsiocb = NULL;
20687 union lpfc_wqe128 *abtswqe;
20688 struct lpfc_io_buf *lpfc_cmd;
20689 int retval = IOCB_ERROR;
20690 u16 xritag = cmdiocb->sli4_xritag;
20691
20692 /*
20693 * The scsi command can not be in txq and it is in flight because the
20694 * pCmd is still pointing at the SCSI command we have to abort. There
20695 * is no need to search the txcmplq. Just send an abort to the FW.
20696 */
20697
20698 abtsiocb = __lpfc_sli_get_iocbq(phba);
20699 if (!abtsiocb)
20700 return WQE_NORESOURCE;
20701
20702 /* Indicate the IO is being aborted by the driver. */
20703 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
20704
20705 abtswqe = &abtsiocb->wqe;
20706 memset(abtswqe, 0, sizeof(*abtswqe));
20707
James Smart696770e2021-05-28 14:22:40 -070020708 if (!lpfc_is_link_up(phba))
James Smartdb7531d2020-11-15 11:26:44 -080020709 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
James Smartdb7531d2020-11-15 11:26:44 -080020710 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
20711 abtswqe->abort_cmd.rsrvd5 = 0;
20712 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
20713 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
20714 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
20715 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
20716 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
20717 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
20718 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
20719
20720 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
20721 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
20722 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
20723 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
20724 abtsiocb->iocb_flag |= LPFC_IO_FCP;
20725 if (cmdiocb->iocb_flag & LPFC_IO_NVME)
20726 abtsiocb->iocb_flag |= LPFC_IO_NVME;
20727 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
20728 abtsiocb->iocb_flag |= LPFC_IO_FOF;
20729 abtsiocb->vport = vport;
20730 abtsiocb->wqe_cmpl = cmpl;
20731
20732 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
20733 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
20734
20735 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
20736 "0359 Abort xri x%x, original iotag x%x, "
20737 "abort cmd iotag x%x retval x%x\n",
20738 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
20739
20740 if (retval) {
20741 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
20742 __lpfc_sli_release_iocbq(phba, abtsiocb);
20743 }
20744
20745 return retval;
20746}
20747
James Smartc4908502019-01-28 11:14:28 -080020748#ifdef LPFC_MXP_STAT
20749/**
20750 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20751 * @phba: pointer to lpfc hba data structure.
20752 * @hwqid: belong to which HWQ.
20753 *
20754 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20755 * 15 seconds after a test case is running.
20756 *
20757 * The user should call lpfc_debugfs_multixripools_write before running a test
20758 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20759 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20760 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20761 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20762 **/
20763void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20764{
20765 struct lpfc_sli4_hdw_queue *qp;
20766 struct lpfc_multixri_pool *multixri_pool;
20767 struct lpfc_pvt_pool *pvt_pool;
20768 struct lpfc_pbl_pool *pbl_pool;
20769 u32 txcmplq_cnt;
20770
20771 qp = &phba->sli4_hba.hdwq[hwqid];
20772 multixri_pool = qp->p_multixri_pool;
20773 if (!multixri_pool)
20774 return;
20775
20776 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20777 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20778 pbl_pool = &qp->p_multixri_pool->pbl_pool;
James Smartc00f62e2019-08-14 16:57:11 -070020779 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
James Smartc4908502019-01-28 11:14:28 -080020780
20781 multixri_pool->stat_pbl_count = pbl_pool->count;
20782 multixri_pool->stat_pvt_count = pvt_pool->count;
20783 multixri_pool->stat_busy_count = txcmplq_cnt;
20784 }
20785
20786 multixri_pool->stat_snapshot_taken++;
20787}
20788#endif
20789
20790/**
20791 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20792 * @phba: pointer to lpfc hba data structure.
20793 * @hwqid: belong to which HWQ.
20794 *
20795 * This routine moves some XRIs from private to public pool when private pool
20796 * is not busy.
20797 **/
20798void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20799{
20800 struct lpfc_multixri_pool *multixri_pool;
20801 u32 io_req_count;
20802 u32 prev_io_req_count;
20803
20804 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20805 if (!multixri_pool)
20806 return;
20807 io_req_count = multixri_pool->io_req_count;
20808 prev_io_req_count = multixri_pool->prev_io_req_count;
20809
20810 if (prev_io_req_count != io_req_count) {
20811 /* Private pool is busy */
20812 multixri_pool->prev_io_req_count = io_req_count;
20813 } else {
20814 /* Private pool is not busy.
20815 * Move XRIs from private to public pool.
20816 */
20817 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20818 }
20819}
20820
20821/**
20822 * lpfc_adjust_high_watermark - Adjust high watermark
20823 * @phba: pointer to lpfc hba data structure.
20824 * @hwqid: belong to which HWQ.
20825 *
20826 * This routine sets high watermark as number of outstanding XRIs,
20827 * but make sure the new value is between xri_limit/2 and xri_limit.
20828 **/
20829void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20830{
20831 u32 new_watermark;
20832 u32 watermark_max;
20833 u32 watermark_min;
20834 u32 xri_limit;
20835 u32 txcmplq_cnt;
20836 u32 abts_io_bufs;
20837 struct lpfc_multixri_pool *multixri_pool;
20838 struct lpfc_sli4_hdw_queue *qp;
20839
20840 qp = &phba->sli4_hba.hdwq[hwqid];
20841 multixri_pool = qp->p_multixri_pool;
20842 if (!multixri_pool)
20843 return;
20844 xri_limit = multixri_pool->xri_limit;
20845
20846 watermark_max = xri_limit;
20847 watermark_min = xri_limit / 2;
20848
James Smartc00f62e2019-08-14 16:57:11 -070020849 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
James Smartc4908502019-01-28 11:14:28 -080020850 abts_io_bufs = qp->abts_scsi_io_bufs;
James Smartc00f62e2019-08-14 16:57:11 -070020851 abts_io_bufs += qp->abts_nvme_io_bufs;
James Smartc4908502019-01-28 11:14:28 -080020852
20853 new_watermark = txcmplq_cnt + abts_io_bufs;
20854 new_watermark = min(watermark_max, new_watermark);
20855 new_watermark = max(watermark_min, new_watermark);
20856 multixri_pool->pvt_pool.high_watermark = new_watermark;
20857
20858#ifdef LPFC_MXP_STAT
20859 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20860 new_watermark);
20861#endif
20862}
20863
20864/**
20865 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20866 * @phba: pointer to lpfc hba data structure.
20867 * @hwqid: belong to which HWQ.
20868 *
20869 * This routine is called from hearbeat timer when pvt_pool is idle.
20870 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20871 * The first step moves (all - low_watermark) amount of XRIs.
20872 * The second step moves the rest of XRIs.
20873 **/
20874void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20875{
20876 struct lpfc_pbl_pool *pbl_pool;
20877 struct lpfc_pvt_pool *pvt_pool;
James Smart6a828b02019-01-28 11:14:31 -080020878 struct lpfc_sli4_hdw_queue *qp;
James Smartc4908502019-01-28 11:14:28 -080020879 struct lpfc_io_buf *lpfc_ncmd;
20880 struct lpfc_io_buf *lpfc_ncmd_next;
20881 unsigned long iflag;
20882 struct list_head tmp_list;
20883 u32 tmp_count;
20884
James Smart6a828b02019-01-28 11:14:31 -080020885 qp = &phba->sli4_hba.hdwq[hwqid];
20886 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20887 pvt_pool = &qp->p_multixri_pool->pvt_pool;
James Smartc4908502019-01-28 11:14:28 -080020888 tmp_count = 0;
20889
James Smart6a828b02019-01-28 11:14:31 -080020890 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20891 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080020892
20893 if (pvt_pool->count > pvt_pool->low_watermark) {
20894 /* Step 1: move (all - low_watermark) from pvt_pool
20895 * to pbl_pool
20896 */
20897
20898 /* Move low watermark of bufs from pvt_pool to tmp_list */
20899 INIT_LIST_HEAD(&tmp_list);
20900 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20901 &pvt_pool->list, list) {
20902 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20903 tmp_count++;
20904 if (tmp_count >= pvt_pool->low_watermark)
20905 break;
20906 }
20907
20908 /* Move all bufs from pvt_pool to pbl_pool */
20909 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20910
20911 /* Move all bufs from tmp_list to pvt_pool */
20912 list_splice(&tmp_list, &pvt_pool->list);
20913
20914 pbl_pool->count += (pvt_pool->count - tmp_count);
20915 pvt_pool->count = tmp_count;
20916 } else {
20917 /* Step 2: move the rest from pvt_pool to pbl_pool */
20918 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20919 pbl_pool->count += pvt_pool->count;
20920 pvt_pool->count = 0;
20921 }
20922
20923 spin_unlock(&pvt_pool->lock);
20924 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20925}
20926
20927/**
20928 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20929 * @phba: pointer to lpfc hba data structure
Lee Jones7af29d42020-07-21 17:41:31 +010020930 * @qp: pointer to HDW queue
James Smartc4908502019-01-28 11:14:28 -080020931 * @pbl_pool: specified public free XRI pool
20932 * @pvt_pool: specified private free XRI pool
20933 * @count: number of XRIs to move
20934 *
20935 * This routine tries to move some free common bufs from the specified pbl_pool
20936 * to the specified pvt_pool. It might move less than count XRIs if there's not
20937 * enough in public pool.
20938 *
20939 * Return:
20940 * true - if XRIs are successfully moved from the specified pbl_pool to the
20941 * specified pvt_pool
20942 * false - if the specified pbl_pool is empty or locked by someone else
20943 **/
20944static bool
James Smart6a828b02019-01-28 11:14:31 -080020945_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20946 struct lpfc_pbl_pool *pbl_pool,
James Smartc4908502019-01-28 11:14:28 -080020947 struct lpfc_pvt_pool *pvt_pool, u32 count)
20948{
20949 struct lpfc_io_buf *lpfc_ncmd;
20950 struct lpfc_io_buf *lpfc_ncmd_next;
20951 unsigned long iflag;
20952 int ret;
20953
20954 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20955 if (ret) {
20956 if (pbl_pool->count) {
20957 /* Move a batch of XRIs from public to private pool */
James Smart6a828b02019-01-28 11:14:31 -080020958 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080020959 list_for_each_entry_safe(lpfc_ncmd,
20960 lpfc_ncmd_next,
20961 &pbl_pool->list,
20962 list) {
20963 list_move_tail(&lpfc_ncmd->list,
20964 &pvt_pool->list);
20965 pvt_pool->count++;
20966 pbl_pool->count--;
20967 count--;
20968 if (count == 0)
20969 break;
20970 }
20971
20972 spin_unlock(&pvt_pool->lock);
20973 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20974 return true;
20975 }
20976 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20977 }
20978
20979 return false;
20980}
20981
20982/**
20983 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20984 * @phba: pointer to lpfc hba data structure.
20985 * @hwqid: belong to which HWQ.
20986 * @count: number of XRIs to move
20987 *
20988 * This routine tries to find some free common bufs in one of public pools with
20989 * Round Robin method. The search always starts from local hwqid, then the next
20990 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20991 * a batch of free common bufs are moved to private pool on hwqid.
20992 * It might move less than count XRIs if there's not enough in public pool.
20993 **/
20994void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20995{
20996 struct lpfc_multixri_pool *multixri_pool;
20997 struct lpfc_multixri_pool *next_multixri_pool;
20998 struct lpfc_pvt_pool *pvt_pool;
20999 struct lpfc_pbl_pool *pbl_pool;
James Smart6a828b02019-01-28 11:14:31 -080021000 struct lpfc_sli4_hdw_queue *qp;
James Smartc4908502019-01-28 11:14:28 -080021001 u32 next_hwqid;
21002 u32 hwq_count;
21003 int ret;
21004
James Smart6a828b02019-01-28 11:14:31 -080021005 qp = &phba->sli4_hba.hdwq[hwqid];
21006 multixri_pool = qp->p_multixri_pool;
James Smartc4908502019-01-28 11:14:28 -080021007 pvt_pool = &multixri_pool->pvt_pool;
21008 pbl_pool = &multixri_pool->pbl_pool;
21009
21010 /* Check if local pbl_pool is available */
James Smart6a828b02019-01-28 11:14:31 -080021011 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
James Smartc4908502019-01-28 11:14:28 -080021012 if (ret) {
21013#ifdef LPFC_MXP_STAT
21014 multixri_pool->local_pbl_hit_count++;
21015#endif
21016 return;
21017 }
21018
21019 hwq_count = phba->cfg_hdw_queue;
21020
21021 /* Get the next hwqid which was found last time */
21022 next_hwqid = multixri_pool->rrb_next_hwqid;
21023
21024 do {
21025 /* Go to next hwq */
21026 next_hwqid = (next_hwqid + 1) % hwq_count;
21027
21028 next_multixri_pool =
21029 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21030 pbl_pool = &next_multixri_pool->pbl_pool;
21031
21032 /* Check if the public free xri pool is available */
21033 ret = _lpfc_move_xri_pbl_to_pvt(
James Smart6a828b02019-01-28 11:14:31 -080021034 phba, qp, pbl_pool, pvt_pool, count);
James Smartc4908502019-01-28 11:14:28 -080021035
21036 /* Exit while-loop if success or all hwqid are checked */
21037 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21038
21039 /* Starting point for the next time */
21040 multixri_pool->rrb_next_hwqid = next_hwqid;
21041
21042 if (!ret) {
21043 /* stats: all public pools are empty*/
21044 multixri_pool->pbl_empty_count++;
21045 }
21046
21047#ifdef LPFC_MXP_STAT
21048 if (ret) {
21049 if (next_hwqid == hwqid)
21050 multixri_pool->local_pbl_hit_count++;
21051 else
21052 multixri_pool->other_pbl_hit_count++;
21053 }
21054#endif
21055}
21056
21057/**
21058 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21059 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010021060 * @hwqid: belong to which HWQ.
James Smartc4908502019-01-28 11:14:28 -080021061 *
21062 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21063 * low watermark.
21064 **/
21065void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21066{
21067 struct lpfc_multixri_pool *multixri_pool;
21068 struct lpfc_pvt_pool *pvt_pool;
21069
21070 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21071 pvt_pool = &multixri_pool->pvt_pool;
21072
21073 if (pvt_pool->count < pvt_pool->low_watermark)
21074 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21075}
21076
21077/**
21078 * lpfc_release_io_buf - Return one IO buf back to free pool
21079 * @phba: pointer to lpfc hba data structure.
21080 * @lpfc_ncmd: IO buf to be returned.
21081 * @qp: belong to which HWQ.
21082 *
21083 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21084 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21085 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21086 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21087 * lpfc_io_buf_list_put.
21088 **/
21089void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21090 struct lpfc_sli4_hdw_queue *qp)
21091{
21092 unsigned long iflag;
21093 struct lpfc_pbl_pool *pbl_pool;
21094 struct lpfc_pvt_pool *pvt_pool;
21095 struct lpfc_epd_pool *epd_pool;
21096 u32 txcmplq_cnt;
21097 u32 xri_owned;
21098 u32 xri_limit;
21099 u32 abts_io_bufs;
21100
21101 /* MUST zero fields if buffer is reused by another protocol */
21102 lpfc_ncmd->nvmeCmd = NULL;
21103 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
21104 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
21105
James Smart35a635a2019-09-21 20:59:02 -070021106 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21107 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21108 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21109
21110 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21111 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21112
James Smartc4908502019-01-28 11:14:28 -080021113 if (phba->cfg_xri_rebalancing) {
21114 if (lpfc_ncmd->expedite) {
21115 /* Return to expedite pool */
21116 epd_pool = &phba->epd_pool;
21117 spin_lock_irqsave(&epd_pool->lock, iflag);
21118 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21119 epd_pool->count++;
21120 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21121 return;
21122 }
21123
21124 /* Avoid invalid access if an IO sneaks in and is being rejected
21125 * just _after_ xri pools are destroyed in lpfc_offline.
21126 * Nothing much can be done at this point.
21127 */
21128 if (!qp->p_multixri_pool)
21129 return;
21130
21131 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21132 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21133
James Smartc00f62e2019-08-14 16:57:11 -070021134 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
James Smartc4908502019-01-28 11:14:28 -080021135 abts_io_bufs = qp->abts_scsi_io_bufs;
James Smartc00f62e2019-08-14 16:57:11 -070021136 abts_io_bufs += qp->abts_nvme_io_bufs;
James Smartc4908502019-01-28 11:14:28 -080021137
21138 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21139 xri_limit = qp->p_multixri_pool->xri_limit;
21140
21141#ifdef LPFC_MXP_STAT
21142 if (xri_owned <= xri_limit)
21143 qp->p_multixri_pool->below_limit_count++;
21144 else
21145 qp->p_multixri_pool->above_limit_count++;
21146#endif
21147
21148 /* XRI goes to either public or private free xri pool
21149 * based on watermark and xri_limit
21150 */
21151 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21152 (xri_owned < xri_limit &&
21153 pvt_pool->count < pvt_pool->high_watermark)) {
James Smart6a828b02019-01-28 11:14:31 -080021154 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21155 qp, free_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080021156 list_add_tail(&lpfc_ncmd->list,
21157 &pvt_pool->list);
21158 pvt_pool->count++;
21159 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21160 } else {
James Smart6a828b02019-01-28 11:14:31 -080021161 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21162 qp, free_pub_pool);
James Smartc4908502019-01-28 11:14:28 -080021163 list_add_tail(&lpfc_ncmd->list,
21164 &pbl_pool->list);
21165 pbl_pool->count++;
21166 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21167 }
21168 } else {
James Smart6a828b02019-01-28 11:14:31 -080021169 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21170 qp, free_xri);
James Smartc4908502019-01-28 11:14:28 -080021171 list_add_tail(&lpfc_ncmd->list,
21172 &qp->lpfc_io_buf_list_put);
21173 qp->put_io_bufs++;
21174 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21175 iflag);
21176 }
21177}
21178
21179/**
21180 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21181 * @phba: pointer to lpfc hba data structure.
Lee Jones7af29d42020-07-21 17:41:31 +010021182 * @qp: pointer to HDW queue
James Smartc4908502019-01-28 11:14:28 -080021183 * @pvt_pool: pointer to private pool data structure.
21184 * @ndlp: pointer to lpfc nodelist data structure.
21185 *
21186 * This routine tries to get one free IO buf from private pool.
21187 *
21188 * Return:
21189 * pointer to one free IO buf - if private pool is not empty
21190 * NULL - if private pool is empty
21191 **/
21192static struct lpfc_io_buf *
21193lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
James Smart6a828b02019-01-28 11:14:31 -080021194 struct lpfc_sli4_hdw_queue *qp,
James Smartc4908502019-01-28 11:14:28 -080021195 struct lpfc_pvt_pool *pvt_pool,
21196 struct lpfc_nodelist *ndlp)
21197{
21198 struct lpfc_io_buf *lpfc_ncmd;
21199 struct lpfc_io_buf *lpfc_ncmd_next;
21200 unsigned long iflag;
21201
James Smart6a828b02019-01-28 11:14:31 -080021202 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
James Smartc4908502019-01-28 11:14:28 -080021203 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21204 &pvt_pool->list, list) {
21205 if (lpfc_test_rrq_active(
21206 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21207 continue;
21208 list_del(&lpfc_ncmd->list);
21209 pvt_pool->count--;
21210 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21211 return lpfc_ncmd;
21212 }
21213 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21214
21215 return NULL;
21216}
21217
21218/**
21219 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21220 * @phba: pointer to lpfc hba data structure.
21221 *
21222 * This routine tries to get one free IO buf from expedite pool.
21223 *
21224 * Return:
21225 * pointer to one free IO buf - if expedite pool is not empty
21226 * NULL - if expedite pool is empty
21227 **/
21228static struct lpfc_io_buf *
21229lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21230{
21231 struct lpfc_io_buf *lpfc_ncmd;
21232 struct lpfc_io_buf *lpfc_ncmd_next;
21233 unsigned long iflag;
21234 struct lpfc_epd_pool *epd_pool;
21235
21236 epd_pool = &phba->epd_pool;
21237 lpfc_ncmd = NULL;
21238
21239 spin_lock_irqsave(&epd_pool->lock, iflag);
21240 if (epd_pool->count > 0) {
21241 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21242 &epd_pool->list, list) {
21243 list_del(&lpfc_ncmd->list);
21244 epd_pool->count--;
21245 break;
21246 }
21247 }
21248 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21249
21250 return lpfc_ncmd;
21251}
21252
21253/**
21254 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21255 * @phba: pointer to lpfc hba data structure.
21256 * @ndlp: pointer to lpfc nodelist data structure.
21257 * @hwqid: belong to which HWQ
21258 * @expedite: 1 means this request is urgent.
21259 *
21260 * This routine will do the following actions and then return a pointer to
21261 * one free IO buf.
21262 *
21263 * 1. If private free xri count is empty, move some XRIs from public to
21264 * private pool.
21265 * 2. Get one XRI from private free xri pool.
21266 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21267 * get one free xri from expedite pool.
21268 *
21269 * Note: ndlp is only used on SCSI side for RRQ testing.
21270 * The caller should pass NULL for ndlp on NVME side.
21271 *
21272 * Return:
21273 * pointer to one free IO buf - if private pool is not empty
21274 * NULL - if private pool is empty
21275 **/
21276static struct lpfc_io_buf *
21277lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21278 struct lpfc_nodelist *ndlp,
21279 int hwqid, int expedite)
21280{
21281 struct lpfc_sli4_hdw_queue *qp;
21282 struct lpfc_multixri_pool *multixri_pool;
21283 struct lpfc_pvt_pool *pvt_pool;
21284 struct lpfc_io_buf *lpfc_ncmd;
21285
21286 qp = &phba->sli4_hba.hdwq[hwqid];
21287 lpfc_ncmd = NULL;
21288 multixri_pool = qp->p_multixri_pool;
21289 pvt_pool = &multixri_pool->pvt_pool;
21290 multixri_pool->io_req_count++;
21291
21292 /* If pvt_pool is empty, move some XRIs from public to private pool */
21293 if (pvt_pool->count == 0)
21294 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21295
21296 /* Get one XRI from private free xri pool */
James Smart6a828b02019-01-28 11:14:31 -080021297 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
James Smartc4908502019-01-28 11:14:28 -080021298
21299 if (lpfc_ncmd) {
21300 lpfc_ncmd->hdwq = qp;
21301 lpfc_ncmd->hdwq_no = hwqid;
21302 } else if (expedite) {
21303 /* If we fail to get one from pvt_pool and this is an expedite
21304 * request, get one free xri from expedite pool.
21305 */
21306 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21307 }
21308
21309 return lpfc_ncmd;
21310}
21311
21312static inline struct lpfc_io_buf *
21313lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21314{
21315 struct lpfc_sli4_hdw_queue *qp;
21316 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21317
21318 qp = &phba->sli4_hba.hdwq[idx];
21319 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21320 &qp->lpfc_io_buf_list_get, list) {
21321 if (lpfc_test_rrq_active(phba, ndlp,
21322 lpfc_cmd->cur_iocbq.sli4_lxritag))
21323 continue;
21324
21325 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21326 continue;
21327
21328 list_del_init(&lpfc_cmd->list);
21329 qp->get_io_bufs--;
21330 lpfc_cmd->hdwq = qp;
21331 lpfc_cmd->hdwq_no = idx;
21332 return lpfc_cmd;
21333 }
21334 return NULL;
21335}
21336
21337/**
21338 * lpfc_get_io_buf - Get one IO buffer from free pool
21339 * @phba: The HBA for which this call is being executed.
21340 * @ndlp: pointer to lpfc nodelist data structure.
21341 * @hwqid: belong to which HWQ
21342 * @expedite: 1 means this request is urgent.
21343 *
21344 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21345 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21346 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21347 *
21348 * Note: ndlp is only used on SCSI side for RRQ testing.
21349 * The caller should pass NULL for ndlp on NVME side.
21350 *
21351 * Return codes:
21352 * NULL - Error
21353 * Pointer to lpfc_io_buf - Success
21354 **/
21355struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21356 struct lpfc_nodelist *ndlp,
21357 u32 hwqid, int expedite)
21358{
21359 struct lpfc_sli4_hdw_queue *qp;
21360 unsigned long iflag;
21361 struct lpfc_io_buf *lpfc_cmd;
21362
21363 qp = &phba->sli4_hba.hdwq[hwqid];
21364 lpfc_cmd = NULL;
21365
21366 if (phba->cfg_xri_rebalancing)
21367 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21368 phba, ndlp, hwqid, expedite);
21369 else {
James Smart6a828b02019-01-28 11:14:31 -080021370 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21371 qp, alloc_xri_get);
James Smartc4908502019-01-28 11:14:28 -080021372 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21373 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21374 if (!lpfc_cmd) {
James Smart6a828b02019-01-28 11:14:31 -080021375 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21376 qp, alloc_xri_put);
James Smartc4908502019-01-28 11:14:28 -080021377 list_splice(&qp->lpfc_io_buf_list_put,
21378 &qp->lpfc_io_buf_list_get);
21379 qp->get_io_bufs += qp->put_io_bufs;
21380 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21381 qp->put_io_bufs = 0;
21382 spin_unlock(&qp->io_buf_list_put_lock);
21383 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21384 expedite)
21385 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21386 }
21387 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21388 }
21389
21390 return lpfc_cmd;
21391}
James Smartd79c9e92019-08-14 16:57:09 -070021392
21393/**
21394 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21395 * @phba: The HBA for which this call is being executed.
21396 * @lpfc_buf: IO buf structure to append the SGL chunk
21397 *
21398 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21399 * and will allocate an SGL chunk if the pool is empty.
21400 *
21401 * Return codes:
21402 * NULL - Error
21403 * Pointer to sli4_hybrid_sgl - Success
21404 **/
21405struct sli4_hybrid_sgl *
21406lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21407{
21408 struct sli4_hybrid_sgl *list_entry = NULL;
21409 struct sli4_hybrid_sgl *tmp = NULL;
21410 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21411 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21412 struct list_head *buf_list = &hdwq->sgl_list;
James Smarta4c21ac2019-09-21 20:59:01 -070021413 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070021414
James Smarta4c21ac2019-09-21 20:59:01 -070021415 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021416
21417 if (likely(!list_empty(buf_list))) {
21418 /* break off 1 chunk from the sgl_list */
21419 list_for_each_entry_safe(list_entry, tmp,
21420 buf_list, list_node) {
21421 list_move_tail(&list_entry->list_node,
21422 &lpfc_buf->dma_sgl_xtra_list);
21423 break;
21424 }
21425 } else {
21426 /* allocate more */
James Smarta4c21ac2019-09-21 20:59:01 -070021427 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021428 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
James Smart4583a4f2019-11-15 16:38:47 -080021429 cpu_to_node(hdwq->io_wq->chann));
James Smartd79c9e92019-08-14 16:57:09 -070021430 if (!tmp) {
21431 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21432 "8353 error kmalloc memory for HDWQ "
21433 "%d %s\n",
21434 lpfc_buf->hdwq_no, __func__);
21435 return NULL;
21436 }
21437
21438 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21439 GFP_ATOMIC, &tmp->dma_phys_sgl);
21440 if (!tmp->dma_sgl) {
21441 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21442 "8354 error pool_alloc memory for HDWQ "
21443 "%d %s\n",
21444 lpfc_buf->hdwq_no, __func__);
21445 kfree(tmp);
21446 return NULL;
21447 }
21448
James Smarta4c21ac2019-09-21 20:59:01 -070021449 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021450 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21451 }
21452
21453 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21454 struct sli4_hybrid_sgl,
21455 list_node);
21456
James Smarta4c21ac2019-09-21 20:59:01 -070021457 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021458
21459 return allocated_sgl;
21460}
21461
21462/**
21463 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21464 * @phba: The HBA for which this call is being executed.
21465 * @lpfc_buf: IO buf structure with the SGL chunk
21466 *
21467 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21468 *
21469 * Return codes:
21470 * 0 - Success
21471 * -EINVAL - Error
21472 **/
21473int
21474lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21475{
21476 int rc = 0;
21477 struct sli4_hybrid_sgl *list_entry = NULL;
21478 struct sli4_hybrid_sgl *tmp = NULL;
21479 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21480 struct list_head *buf_list = &hdwq->sgl_list;
James Smarta4c21ac2019-09-21 20:59:01 -070021481 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070021482
James Smarta4c21ac2019-09-21 20:59:01 -070021483 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021484
21485 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21486 list_for_each_entry_safe(list_entry, tmp,
21487 &lpfc_buf->dma_sgl_xtra_list,
21488 list_node) {
21489 list_move_tail(&list_entry->list_node,
21490 buf_list);
21491 }
21492 } else {
21493 rc = -EINVAL;
21494 }
21495
James Smarta4c21ac2019-09-21 20:59:01 -070021496 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021497 return rc;
21498}
21499
21500/**
21501 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21502 * @phba: phba object
21503 * @hdwq: hdwq to cleanup sgl buff resources on
21504 *
21505 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21506 *
21507 * Return codes:
21508 * None
21509 **/
21510void
21511lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21512 struct lpfc_sli4_hdw_queue *hdwq)
21513{
21514 struct list_head *buf_list = &hdwq->sgl_list;
21515 struct sli4_hybrid_sgl *list_entry = NULL;
21516 struct sli4_hybrid_sgl *tmp = NULL;
James Smarta4c21ac2019-09-21 20:59:01 -070021517 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070021518
James Smarta4c21ac2019-09-21 20:59:01 -070021519 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021520
21521 /* Free sgl pool */
21522 list_for_each_entry_safe(list_entry, tmp,
21523 buf_list, list_node) {
21524 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21525 list_entry->dma_sgl,
21526 list_entry->dma_phys_sgl);
21527 list_del(&list_entry->list_node);
21528 kfree(list_entry);
21529 }
21530
James Smarta4c21ac2019-09-21 20:59:01 -070021531 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021532}
21533
21534/**
21535 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21536 * @phba: The HBA for which this call is being executed.
21537 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21538 *
21539 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21540 * and will allocate an CMD/RSP buffer if the pool is empty.
21541 *
21542 * Return codes:
21543 * NULL - Error
21544 * Pointer to fcp_cmd_rsp_buf - Success
21545 **/
21546struct fcp_cmd_rsp_buf *
21547lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21548 struct lpfc_io_buf *lpfc_buf)
21549{
21550 struct fcp_cmd_rsp_buf *list_entry = NULL;
21551 struct fcp_cmd_rsp_buf *tmp = NULL;
21552 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21553 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21554 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
James Smarta4c21ac2019-09-21 20:59:01 -070021555 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070021556
James Smarta4c21ac2019-09-21 20:59:01 -070021557 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021558
21559 if (likely(!list_empty(buf_list))) {
21560 /* break off 1 chunk from the list */
21561 list_for_each_entry_safe(list_entry, tmp,
21562 buf_list,
21563 list_node) {
21564 list_move_tail(&list_entry->list_node,
21565 &lpfc_buf->dma_cmd_rsp_list);
21566 break;
21567 }
21568 } else {
21569 /* allocate more */
James Smarta4c21ac2019-09-21 20:59:01 -070021570 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021571 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
James Smart4583a4f2019-11-15 16:38:47 -080021572 cpu_to_node(hdwq->io_wq->chann));
James Smartd79c9e92019-08-14 16:57:09 -070021573 if (!tmp) {
21574 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21575 "8355 error kmalloc memory for HDWQ "
21576 "%d %s\n",
21577 lpfc_buf->hdwq_no, __func__);
21578 return NULL;
21579 }
21580
21581 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21582 GFP_ATOMIC,
21583 &tmp->fcp_cmd_rsp_dma_handle);
21584
21585 if (!tmp->fcp_cmnd) {
21586 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21587 "8356 error pool_alloc memory for HDWQ "
21588 "%d %s\n",
21589 lpfc_buf->hdwq_no, __func__);
21590 kfree(tmp);
21591 return NULL;
21592 }
21593
21594 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21595 sizeof(struct fcp_cmnd));
21596
James Smarta4c21ac2019-09-21 20:59:01 -070021597 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021598 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21599 }
21600
21601 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21602 struct fcp_cmd_rsp_buf,
21603 list_node);
21604
James Smarta4c21ac2019-09-21 20:59:01 -070021605 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021606
21607 return allocated_buf;
21608}
21609
21610/**
21611 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21612 * @phba: The HBA for which this call is being executed.
21613 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21614 *
21615 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21616 *
21617 * Return codes:
21618 * 0 - Success
21619 * -EINVAL - Error
21620 **/
21621int
21622lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21623 struct lpfc_io_buf *lpfc_buf)
21624{
21625 int rc = 0;
21626 struct fcp_cmd_rsp_buf *list_entry = NULL;
21627 struct fcp_cmd_rsp_buf *tmp = NULL;
21628 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21629 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
James Smarta4c21ac2019-09-21 20:59:01 -070021630 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070021631
James Smarta4c21ac2019-09-21 20:59:01 -070021632 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021633
21634 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21635 list_for_each_entry_safe(list_entry, tmp,
21636 &lpfc_buf->dma_cmd_rsp_list,
21637 list_node) {
21638 list_move_tail(&list_entry->list_node,
21639 buf_list);
21640 }
21641 } else {
21642 rc = -EINVAL;
21643 }
21644
James Smarta4c21ac2019-09-21 20:59:01 -070021645 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021646 return rc;
21647}
21648
21649/**
21650 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21651 * @phba: phba object
21652 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21653 *
21654 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21655 *
21656 * Return codes:
21657 * None
21658 **/
21659void
21660lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21661 struct lpfc_sli4_hdw_queue *hdwq)
21662{
21663 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21664 struct fcp_cmd_rsp_buf *list_entry = NULL;
21665 struct fcp_cmd_rsp_buf *tmp = NULL;
James Smarta4c21ac2019-09-21 20:59:01 -070021666 unsigned long iflags;
James Smartd79c9e92019-08-14 16:57:09 -070021667
James Smarta4c21ac2019-09-21 20:59:01 -070021668 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021669
21670 /* Free cmd_rsp buf pool */
21671 list_for_each_entry_safe(list_entry, tmp,
21672 buf_list,
21673 list_node) {
21674 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21675 list_entry->fcp_cmnd,
21676 list_entry->fcp_cmd_rsp_dma_handle);
21677 list_del(&list_entry->list_node);
21678 kfree(list_entry);
21679 }
21680
James Smarta4c21ac2019-09-21 20:59:01 -070021681 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
James Smartd79c9e92019-08-14 16:57:09 -070021682}