blob: 4fd9a8098e865aec909cf87d0cdbd4cba13ad9e6 [file] [log] [blame]
dea31012005-04-17 16:05:31 -05001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04003 * Fibre Channel Host Bus Adapters. *
James Smart67073c62021-03-01 09:18:21 -08004 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
James Smart3e21d1c2018-05-04 20:37:59 -07005 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
James Smart50611572016-03-31 14:12:34 -07006 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04007 * EMULEX and SLI are trademarks of Emulex. *
James Smartd080abe2017-02-12 13:52:39 -08008 * www.broadcom.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04009 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -050010 * *
11 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040012 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea31012005-04-17 16:05:31 -050022 *******************************************************************/
23
dea31012005-04-17 16:05:31 -050024#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
Paul Gortmakeracf3368f2011-05-27 09:47:43 -040029#include <linux/module.h>
dea31012005-04-17 16:05:31 -050030#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
James Smart92d7f7b2007-06-17 19:56:38 -050033#include <linux/ctype.h>
James Smart0d878412009-10-02 15:16:56 -040034#include <linux/aer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
James Smart52d52442011-05-24 11:42:45 -040036#include <linux/firmware.h>
James Smart3ef6d242012-01-18 16:23:48 -050037#include <linux/miscdevice.h>
James Smart7bb03bb2013-04-17 20:19:16 -040038#include <linux/percpu.h>
James Smart895427b2017-02-12 13:52:30 -080039#include <linux/msi.h>
James Smart6a828b02019-01-28 11:14:31 -080040#include <linux/irq.h>
Maurizio Lombardi286871a2017-08-23 16:55:48 -070041#include <linux/bitops.h>
James Smart31f06d22019-08-14 16:56:31 -070042#include <linux/crash_dump.h>
James Smartdcaa2132019-11-04 16:57:06 -080043#include <linux/cpu.h>
James Smart93a4d6f2019-11-04 16:57:05 -080044#include <linux/cpuhotplug.h>
dea31012005-04-17 16:05:31 -050045
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040046#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050047#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
James Smart86c67372017-04-21 16:05:04 -070050#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
James Smartda0436e2009-05-22 14:51:39 -040053#include "lpfc_hw4.h"
dea31012005-04-17 16:05:31 -050054#include "lpfc_hw.h"
55#include "lpfc_sli.h"
James Smartda0436e2009-05-22 14:51:39 -040056#include "lpfc_sli4.h"
James Smartea2151b2008-09-07 11:52:10 -040057#include "lpfc_nl.h"
dea31012005-04-17 16:05:31 -050058#include "lpfc_disc.h"
dea31012005-04-17 16:05:31 -050059#include "lpfc.h"
James Smart895427b2017-02-12 13:52:30 -080060#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
dea31012005-04-17 16:05:31 -050062#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
James Smart92d7f7b2007-06-17 19:56:38 -050064#include "lpfc_vport.h"
dea31012005-04-17 16:05:31 -050065#include "lpfc_version.h"
James Smart12f44452016-07-06 12:36:03 -070066#include "lpfc_ids.h"
dea31012005-04-17 16:05:31 -050067
James Smart93a4d6f2019-11-04 16:57:05 -080068static enum cpuhp_state lpfc_cpuhp_state;
James Smart7bb03bb2013-04-17 20:19:16 -040069/* Used when mapping IRQ vectors in a driver centric manner */
YueHaibingd7b761b2019-05-31 23:28:41 +080070static uint32_t lpfc_present_cpu;
James Smart7bb03bb2013-04-17 20:19:16 -040071
James Smart93a4d6f2019-11-04 16:57:05 -080072static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
dea31012005-04-17 16:05:31 -050075static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
James Smart5350d872011-10-10 21:33:49 -040077static int lpfc_sli4_queue_verify(struct lpfc_hba *);
James Smartda0436e2009-05-22 14:51:39 -040078static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
James Smartda0436e2009-05-22 14:51:39 -040080static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
James Smart8a9d2e82012-05-09 21:16:12 -040081static void lpfc_free_els_sgl_list(struct lpfc_hba *);
James Smartf358dd02017-02-12 13:52:34 -080082static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
James Smart8a9d2e82012-05-09 21:16:12 -040083static void lpfc_init_sgl_list(struct lpfc_hba *);
James Smartda0436e2009-05-22 14:51:39 -040084static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
James Smart618a5232012-06-12 13:54:36 -040091static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
James Smart1ba981f2014-02-20 09:56:45 -050093static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
James Smart6a828b02019-01-28 11:14:31 -080094static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
James Smartaa6ff302019-05-21 17:49:09 -070095static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
dea31012005-04-17 16:05:31 -050096
97static struct scsi_transport_template *lpfc_transport_template = NULL;
James Smart92d7f7b2007-06-17 19:56:38 -050098static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea31012005-04-17 16:05:31 -050099static DEFINE_IDR(lpfc_hba_index);
James Smartf358dd02017-02-12 13:52:34 -0800100#define LPFC_NVMET_BUF_POST 254
Gaurav Srivastava5e633302021-06-08 10:05:49 +0530101static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
dea31012005-04-17 16:05:31 -0500102
James Smarte59058c2008-08-24 21:49:00 -0400103/**
James Smart3621a712009-04-06 18:47:14 -0400104 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
James Smarte59058c2008-08-24 21:49:00 -0400105 * @phba: pointer to lpfc hba data structure.
106 *
107 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
108 * mailbox command. It retrieves the revision information from the HBA and
109 * collects the Vital Product Data (VPD) about the HBA for preparing the
110 * configuration of the HBA.
111 *
112 * Return codes:
113 * 0 - success.
114 * -ERESTART - requests the SLI layer to reset the HBA and try again.
115 * Any other value - indicates an error.
116 **/
dea31012005-04-17 16:05:31 -0500117int
James Smart2e0fef82007-06-17 19:56:36 -0500118lpfc_config_port_prep(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -0500119{
120 lpfc_vpd_t *vp = &phba->vpd;
121 int i = 0, rc;
122 LPFC_MBOXQ_t *pmb;
123 MAILBOX_t *mb;
124 char *lpfc_vpd_data = NULL;
125 uint16_t offset = 0;
126 static char licensed[56] =
127 "key unlock for use with gnu public licensed code only\0";
James Smart65a29c12006-07-06 15:50:50 -0400128 static int init_key = 1;
dea31012005-04-17 16:05:31 -0500129
130 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
131 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -0500132 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -0500133 return -ENOMEM;
134 }
135
James Smart04c68492009-05-22 14:52:52 -0400136 mb = &pmb->u.mb;
James Smart2e0fef82007-06-17 19:56:36 -0500137 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -0500138
139 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
James Smart65a29c12006-07-06 15:50:50 -0400140 if (init_key) {
141 uint32_t *ptext = (uint32_t *) licensed;
dea31012005-04-17 16:05:31 -0500142
James Smart65a29c12006-07-06 15:50:50 -0400143 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
144 *ptext = cpu_to_be32(*ptext);
145 init_key = 0;
146 }
dea31012005-04-17 16:05:31 -0500147
148 lpfc_read_nv(phba, pmb);
149 memset((char*)mb->un.varRDnvp.rsvd3, 0,
150 sizeof (mb->un.varRDnvp.rsvd3));
151 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
152 sizeof (licensed));
153
154 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
155
156 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -0400158 "0324 Config Port initialization "
dea31012005-04-17 16:05:31 -0500159 "error, mbxCmd x%x READ_NVPARM, "
160 "mbxStatus x%x\n",
dea31012005-04-17 16:05:31 -0500161 mb->mbxCommand, mb->mbxStatus);
162 mempool_free(pmb, phba->mbox_mem_pool);
163 return -ERESTART;
164 }
165 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
James Smart2e0fef82007-06-17 19:56:36 -0500166 sizeof(phba->wwnn));
167 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
168 sizeof(phba->wwpn));
dea31012005-04-17 16:05:31 -0500169 }
170
Martin Wilckdfb75132018-11-12 09:58:37 +0100171 /*
172 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
173 * which was already set in lpfc_get_cfgparam()
174 */
175 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
James Smart92d7f7b2007-06-17 19:56:38 -0500176
dea31012005-04-17 16:05:31 -0500177 /* Setup and issue mailbox READ REV command */
178 lpfc_read_rev(phba, pmb);
179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
180 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -0400182 "0439 Adapter failed to init, mbxCmd x%x "
dea31012005-04-17 16:05:31 -0500183 "READ_REV, mbxStatus x%x\n",
dea31012005-04-17 16:05:31 -0500184 mb->mbxCommand, mb->mbxStatus);
185 mempool_free( pmb, phba->mbox_mem_pool);
186 return -ERESTART;
187 }
188
James Smart92d7f7b2007-06-17 19:56:38 -0500189
James.Smart@Emulex.Com1de933f2005-11-28 11:41:15 -0500190 /*
191 * The value of rr must be 1 since the driver set the cv field to 1.
192 * This setting requires the FW to set all revision fields.
dea31012005-04-17 16:05:31 -0500193 */
James.Smart@Emulex.Com1de933f2005-11-28 11:41:15 -0500194 if (mb->un.varRdRev.rr == 0) {
dea31012005-04-17 16:05:31 -0500195 vp->rev.rBit = 0;
Dick Kennedy372c1872020-06-30 14:50:00 -0700196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -0400197 "0440 Adapter failed to init, READ_REV has "
198 "missing revision information.\n");
dea31012005-04-17 16:05:31 -0500199 mempool_free(pmb, phba->mbox_mem_pool);
200 return -ERESTART;
dea31012005-04-17 16:05:31 -0500201 }
202
James Smart495a7142008-06-14 22:52:59 -0400203 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
204 mempool_free(pmb, phba->mbox_mem_pool);
James Smarted957682007-06-17 19:56:37 -0500205 return -EINVAL;
James Smart495a7142008-06-14 22:52:59 -0400206 }
James Smarted957682007-06-17 19:56:37 -0500207
dea31012005-04-17 16:05:31 -0500208 /* Save information as VPD data */
James.Smart@Emulex.Com1de933f2005-11-28 11:41:15 -0500209 vp->rev.rBit = 1;
James Smart92d7f7b2007-06-17 19:56:38 -0500210 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
James.Smart@Emulex.Com1de933f2005-11-28 11:41:15 -0500211 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
212 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
213 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
214 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea31012005-04-17 16:05:31 -0500215 vp->rev.biuRev = mb->un.varRdRev.biuRev;
216 vp->rev.smRev = mb->un.varRdRev.smRev;
217 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
218 vp->rev.endecRev = mb->un.varRdRev.endecRev;
219 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
220 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
221 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
222 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
223 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
224 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
225
James Smart92d7f7b2007-06-17 19:56:38 -0500226 /* If the sli feature level is less then 9, we must
227 * tear down all RPIs and VPIs on link down if NPIV
228 * is enabled.
229 */
230 if (vp->rev.feaLevelHigh < 9)
231 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
232
dea31012005-04-17 16:05:31 -0500233 if (lpfc_is_LC_HBA(phba->pcidev->device))
234 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
235 sizeof (phba->RandomData));
236
dea31012005-04-17 16:05:31 -0500237 /* Get adapter VPD information */
dea31012005-04-17 16:05:31 -0500238 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
239 if (!lpfc_vpd_data)
James Smartd7c255b2008-08-24 21:50:00 -0400240 goto out_free_mbox;
dea31012005-04-17 16:05:31 -0500241 do {
James Smarta0c87cb2009-07-19 10:01:10 -0400242 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea31012005-04-17 16:05:31 -0500243 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
244
245 if (rc != MBX_SUCCESS) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -0400247 "0441 VPD not present on adapter, "
dea31012005-04-17 16:05:31 -0500248 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea31012005-04-17 16:05:31 -0500249 mb->mbxCommand, mb->mbxStatus);
Jamie Wellnitz74b72a52006-02-28 22:33:04 -0500250 mb->un.varDmp.word_cnt = 0;
dea31012005-04-17 16:05:31 -0500251 }
James Smart04c68492009-05-22 14:52:52 -0400252 /* dump mem may return a zero when finished or we got a
253 * mailbox error, either way we are done.
254 */
255 if (mb->un.varDmp.word_cnt == 0)
256 break;
Dick Kennedyd91e3ab2020-06-30 14:49:52 -0700257
James Smarte4ec1022021-04-21 16:45:11 -0700258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
James Smartd7c255b2008-08-24 21:50:00 -0400260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
James Smarte4ec1022021-04-21 16:45:11 -0700261 lpfc_vpd_data + offset,
262 mb->un.varDmp.word_cnt);
263 offset += mb->un.varDmp.word_cnt;
264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
Dick Kennedyd91e3ab2020-06-30 14:49:52 -0700265
Jamie Wellnitz74b72a52006-02-28 22:33:04 -0500266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea31012005-04-17 16:05:31 -0500267
268 kfree(lpfc_vpd_data);
dea31012005-04-17 16:05:31 -0500269out_free_mbox:
270 mempool_free(pmb, phba->mbox_mem_pool);
271 return 0;
272}
273
James Smarte59058c2008-08-24 21:49:00 -0400274/**
James Smart3621a712009-04-06 18:47:14 -0400275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
James Smarte59058c2008-08-24 21:49:00 -0400276 * @phba: pointer to lpfc hba data structure.
277 * @pmboxq: pointer to the driver internal queue element for mailbox command.
278 *
279 * This is the completion handler for driver's configuring asynchronous event
280 * mailbox command to the device. If the mailbox command returns successfully,
281 * it will set internal async event support flag to 1; otherwise, it will
282 * set internal async event support flag to 0.
283 **/
James Smart57127f12007-10-27 13:37:05 -0400284static void
285lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
286{
James Smart04c68492009-05-22 14:52:52 -0400287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
James Smart57127f12007-10-27 13:37:05 -0400288 phba->temp_sensor_support = 1;
289 else
290 phba->temp_sensor_support = 0;
291 mempool_free(pmboxq, phba->mbox_mem_pool);
292 return;
293}
294
James Smarte59058c2008-08-24 21:49:00 -0400295/**
James Smart3621a712009-04-06 18:47:14 -0400296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
James Smart97207482008-12-04 22:39:19 -0500297 * @phba: pointer to lpfc hba data structure.
298 * @pmboxq: pointer to the driver internal queue element for mailbox command.
299 *
300 * This is the completion handler for dump mailbox command for getting
301 * wake up parameters. When this command complete, the response contain
302 * Option rom version of the HBA. This function translate the version number
303 * into a human readable string and store it in OptionROMVersion.
304 **/
305static void
306lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
307{
308 struct prog_id *prg;
309 uint32_t prog_id_word;
310 char dist = ' ';
311 /* character array used for decoding dist type. */
312 char dist_char[] = "nabx";
313
James Smart04c68492009-05-22 14:52:52 -0400314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
James Smart9f1e1b52008-12-04 22:39:40 -0500315 mempool_free(pmboxq, phba->mbox_mem_pool);
James Smart97207482008-12-04 22:39:19 -0500316 return;
James Smart9f1e1b52008-12-04 22:39:40 -0500317 }
James Smart97207482008-12-04 22:39:19 -0500318
319 prg = (struct prog_id *) &prog_id_word;
320
321 /* word 7 contain option rom version */
James Smart04c68492009-05-22 14:52:52 -0400322 prog_id_word = pmboxq->u.mb.un.varWords[7];
James Smart97207482008-12-04 22:39:19 -0500323
324 /* Decode the Option rom version word to a readable string */
325 if (prg->dist < 4)
326 dist = dist_char[prg->dist];
327
328 if ((prg->dist == 3) && (prg->num == 0))
James Smarta2fc4aef2014-09-03 12:57:55 -0400329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
James Smart97207482008-12-04 22:39:19 -0500330 prg->ver, prg->rev, prg->lev);
331 else
James Smarta2fc4aef2014-09-03 12:57:55 -0400332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
James Smart97207482008-12-04 22:39:19 -0500333 prg->ver, prg->rev, prg->lev,
334 dist, prg->num);
James Smart9f1e1b52008-12-04 22:39:40 -0500335 mempool_free(pmboxq, phba->mbox_mem_pool);
James Smart97207482008-12-04 22:39:19 -0500336 return;
337}
338
339/**
James Smart05580562011-05-24 11:40:48 -0400340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
341 * cfg_soft_wwnn, cfg_soft_wwpn
342 * @vport: pointer to lpfc vport data structure.
343 *
344 *
345 * Return codes
346 * None.
347 **/
348void
349lpfc_update_vport_wwn(struct lpfc_vport *vport)
350{
James Smartaeb3c812017-04-21 16:05:02 -0700351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
353
James Smart05580562011-05-24 11:40:48 -0400354 /* If the soft name exists then update it using the service params */
355 if (vport->phba->cfg_soft_wwnn)
356 u64_to_wwn(vport->phba->cfg_soft_wwnn,
357 vport->fc_sparam.nodeName.u.wwn);
358 if (vport->phba->cfg_soft_wwpn)
359 u64_to_wwn(vport->phba->cfg_soft_wwpn,
360 vport->fc_sparam.portName.u.wwn);
361
362 /*
363 * If the name is empty or there exists a soft name
364 * then copy the service params name, otherwise use the fc name
365 */
366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
368 sizeof(struct lpfc_name));
369 else
370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
371 sizeof(struct lpfc_name));
372
James Smartaeb3c812017-04-21 16:05:02 -0700373 /*
374 * If the port name has changed, then set the Param changes flag
375 * to unreg the login
376 */
377 if (vport->fc_portname.u.wwn[0] != 0 &&
378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name)))
380 vport->vport_flag |= FAWWPN_PARAM_CHG;
381
382 if (vport->fc_portname.u.wwn[0] == 0 ||
383 vport->phba->cfg_soft_wwpn ||
384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
385 vport->vport_flag & FAWWPN_SET) {
James Smart05580562011-05-24 11:40:48 -0400386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
387 sizeof(struct lpfc_name));
James Smartaeb3c812017-04-21 16:05:02 -0700388 vport->vport_flag &= ~FAWWPN_SET;
389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
390 vport->vport_flag |= FAWWPN_SET;
391 }
James Smart05580562011-05-24 11:40:48 -0400392 else
393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
394 sizeof(struct lpfc_name));
395}
396
397/**
James Smart3621a712009-04-06 18:47:14 -0400398 * lpfc_config_port_post - Perform lpfc initialization after config port
James Smarte59058c2008-08-24 21:49:00 -0400399 * @phba: pointer to lpfc hba data structure.
400 *
401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
402 * command call. It performs all internal resource and state setups on the
403 * port: post IOCB buffers, enable appropriate host interrupt attentions,
404 * ELS ring timers, etc.
405 *
406 * Return codes
407 * 0 - success.
408 * Any other value - error.
409 **/
dea31012005-04-17 16:05:31 -0500410int
James Smart2e0fef82007-06-17 19:56:36 -0500411lpfc_config_port_post(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -0500412{
James Smart2e0fef82007-06-17 19:56:36 -0500413 struct lpfc_vport *vport = phba->pport;
James Smarta257bf92009-04-06 18:48:10 -0400414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea31012005-04-17 16:05:31 -0500415 LPFC_MBOXQ_t *pmb;
416 MAILBOX_t *mb;
417 struct lpfc_dmabuf *mp;
418 struct lpfc_sli *psli = &phba->sli;
419 uint32_t status, timeout;
James Smart2e0fef82007-06-17 19:56:36 -0500420 int i, j;
421 int rc;
dea31012005-04-17 16:05:31 -0500422
James Smart7af67052007-10-27 13:38:11 -0400423 spin_lock_irq(&phba->hbalock);
424 /*
425 * If the Config port completed correctly the HBA is not
426 * over heated any more.
427 */
428 if (phba->over_temp_state == HBA_OVER_TEMP)
429 phba->over_temp_state = HBA_NORMAL_TEMP;
430 spin_unlock_irq(&phba->hbalock);
431
dea31012005-04-17 16:05:31 -0500432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
433 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -0500434 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -0500435 return -ENOMEM;
436 }
James Smart04c68492009-05-22 14:52:52 -0400437 mb = &pmb->u.mb;
dea31012005-04-17 16:05:31 -0500438
dea31012005-04-17 16:05:31 -0500439 /* Get login parameters for NID. */
James Smart9f1177a2010-02-26 14:12:57 -0500440 rc = lpfc_read_sparam(phba, pmb, 0);
441 if (rc) {
442 mempool_free(pmb, phba->mbox_mem_pool);
443 return -ENOMEM;
444 }
445
James Smarted957682007-06-17 19:56:37 -0500446 pmb->vport = vport;
dea31012005-04-17 16:05:31 -0500447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -0400449 "0448 Adapter failed init, mbxCmd x%x "
dea31012005-04-17 16:05:31 -0500450 "READ_SPARM mbxStatus x%x\n",
dea31012005-04-17 16:05:31 -0500451 mb->mbxCommand, mb->mbxStatus);
James Smart2e0fef82007-06-17 19:56:36 -0500452 phba->link_state = LPFC_HBA_ERROR;
James Smart3e1f0712018-11-29 16:09:29 -0800453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
James Smart9f1177a2010-02-26 14:12:57 -0500454 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -0500455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 return -EIO;
458 }
459
James Smart3e1f0712018-11-29 16:09:29 -0800460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
dea31012005-04-17 16:05:31 -0500461
James Smart2e0fef82007-06-17 19:56:36 -0500462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea31012005-04-17 16:05:31 -0500463 lpfc_mbuf_free(phba, mp->virt, mp->phys);
464 kfree(mp);
James Smart3e1f0712018-11-29 16:09:29 -0800465 pmb->ctx_buf = NULL;
James Smart05580562011-05-24 11:40:48 -0400466 lpfc_update_vport_wwn(vport);
James Smarta257bf92009-04-06 18:48:10 -0400467
468 /* Update the fc_host data structures with new wwn. */
469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
James Smart21e9a0a2009-05-22 14:53:21 -0400471 fc_host_max_npiv_vports(shost) = phba->max_vpi;
James Smarta257bf92009-04-06 18:48:10 -0400472
dea31012005-04-17 16:05:31 -0500473 /* If no serial number in VPD data, use low 6 bytes of WWNN */
474 /* This should be consolidated into parse_vpd ? - mr */
475 if (phba->SerialNumber[0] == 0) {
476 uint8_t *outptr;
477
James Smart2e0fef82007-06-17 19:56:36 -0500478 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea31012005-04-17 16:05:31 -0500479 for (i = 0; i < 12; i++) {
480 status = *outptr++;
481 j = ((status & 0xf0) >> 4);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 i++;
489 j = (status & 0xf);
490 if (j <= 9)
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x30 + (uint8_t) j);
493 else
494 phba->SerialNumber[i] =
495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
496 }
497 }
498
dea31012005-04-17 16:05:31 -0500499 lpfc_read_config(phba, pmb);
James Smarted957682007-06-17 19:56:37 -0500500 pmb->vport = vport;
dea31012005-04-17 16:05:31 -0500501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -0400503 "0453 Adapter failed to init, mbxCmd x%x "
dea31012005-04-17 16:05:31 -0500504 "READ_CONFIG, mbxStatus x%x\n",
dea31012005-04-17 16:05:31 -0500505 mb->mbxCommand, mb->mbxStatus);
James Smart2e0fef82007-06-17 19:56:36 -0500506 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -0500507 mempool_free( pmb, phba->mbox_mem_pool);
508 return -EIO;
509 }
510
James Smarta0c87cb2009-07-19 10:01:10 -0400511 /* Check if the port is disabled */
512 lpfc_sli_read_link_ste(phba);
513
dea31012005-04-17 16:05:31 -0500514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
James Smartf6770e72020-01-27 16:23:09 -0800515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
James Smart572709e2013-07-15 18:32:43 -0400516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
James Smartf6770e72020-01-27 16:23:09 -0800518 phba->cfg_hba_queue_depth,
519 mb->un.varRdConfig.max_xri);
520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
James Smart572709e2013-07-15 18:32:43 -0400521 }
dea31012005-04-17 16:05:31 -0500522
523 phba->lmt = mb->un.varRdConfig.lmt;
Jamie Wellnitz74b72a52006-02-28 22:33:04 -0500524
525 /* Get the default values for Model Name and Description */
526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
527
James Smart2e0fef82007-06-17 19:56:36 -0500528 phba->link_state = LPFC_LINK_DOWN;
dea31012005-04-17 16:05:31 -0500529
James Smart0b727fe2007-10-27 13:37:25 -0400530 /* Only process IOCBs on ELS ring till hba_state is READY */
James Smart895427b2017-02-12 13:52:30 -0800531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
dea31012005-04-17 16:05:31 -0500535
536 /* Post receive buffers for desired rings */
James Smarted957682007-06-17 19:56:37 -0500537 if (phba->sli_rev != 3)
538 lpfc_post_rcv_buf(phba);
dea31012005-04-17 16:05:31 -0500539
James Smart93996272008-08-24 21:50:30 -0400540 /*
541 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
542 */
543 if (phba->intr_type == MSIX) {
544 rc = lpfc_config_msi(phba, pmb);
545 if (rc) {
546 mempool_free(pmb, phba->mbox_mem_pool);
547 return -EIO;
548 }
549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
550 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart93996272008-08-24 21:50:30 -0400552 "0352 Config MSI mailbox command "
553 "failed, mbxCmd x%x, mbxStatus x%x\n",
James Smart04c68492009-05-22 14:52:52 -0400554 pmb->u.mb.mbxCommand,
555 pmb->u.mb.mbxStatus);
James Smart93996272008-08-24 21:50:30 -0400556 mempool_free(pmb, phba->mbox_mem_pool);
557 return -EIO;
558 }
559 }
560
James Smart04c68492009-05-22 14:52:52 -0400561 spin_lock_irq(&phba->hbalock);
James Smart93996272008-08-24 21:50:30 -0400562 /* Initialize ERATT handling flag */
563 phba->hba_flag &= ~HBA_ERATT_HANDLED;
564
dea31012005-04-17 16:05:31 -0500565 /* Enable appropriate host interrupts */
James Smart9940b972011-03-11 16:06:12 -0500566 if (lpfc_readl(phba->HCregaddr, &status)) {
567 spin_unlock_irq(&phba->hbalock);
568 return -EIO;
569 }
dea31012005-04-17 16:05:31 -0500570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
571 if (psli->num_rings > 0)
572 status |= HC_R0INT_ENA;
573 if (psli->num_rings > 1)
574 status |= HC_R1INT_ENA;
575 if (psli->num_rings > 2)
576 status |= HC_R2INT_ENA;
577 if (psli->num_rings > 3)
578 status |= HC_R3INT_ENA;
579
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -0500580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
581 (phba->cfg_poll & DISABLE_FCP_RING_INT))
James Smart93996272008-08-24 21:50:30 -0400582 status &= ~(HC_R0INT_ENA);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -0500583
dea31012005-04-17 16:05:31 -0500584 writel(status, phba->HCregaddr);
585 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -0500586 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -0500587
James Smart93996272008-08-24 21:50:30 -0400588 /* Set up ring-0 (ELS) timer */
589 timeout = phba->fc_ratov * 2;
James Smart256ec0d2013-04-17 20:14:58 -0400590 mod_timer(&vport->els_tmofunc,
591 jiffies + msecs_to_jiffies(1000 * timeout));
James Smart93996272008-08-24 21:50:30 -0400592 /* Set up heart beat (HB) timer */
James Smart256ec0d2013-04-17 20:14:58 -0400593 mod_timer(&phba->hb_tmofunc,
594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
James Smarta22d73b2021-01-04 10:02:38 -0800595 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
James Smart858c9f62007-06-17 19:56:39 -0500596 phba->last_completion_time = jiffies;
James Smart93996272008-08-24 21:50:30 -0400597 /* Set up error attention (ERATT) polling timer */
James Smart256ec0d2013-04-17 20:14:58 -0400598 mod_timer(&phba->eratt_poll,
James Smart65791f12016-07-06 12:35:56 -0700599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
dea31012005-04-17 16:05:31 -0500600
James Smarta0c87cb2009-07-19 10:01:10 -0400601 if (phba->hba_flag & LINK_DISABLED) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
603 "2598 Adapter Link is disabled.\n");
James Smarta0c87cb2009-07-19 10:01:10 -0400604 lpfc_down_link(phba, pmb);
605 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
607 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609 "2599 Adapter failed to issue DOWN_LINK"
610 " mbox command rc 0x%x\n", rc);
James Smarta0c87cb2009-07-19 10:01:10 -0400611
612 mempool_free(pmb, phba->mbox_mem_pool);
613 return -EIO;
614 }
James Smarte40a02c2010-02-26 14:13:54 -0500615 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
James Smart026abb82011-12-13 13:20:45 -0500616 mempool_free(pmb, phba->mbox_mem_pool);
617 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
618 if (rc)
619 return rc;
dea31012005-04-17 16:05:31 -0500620 }
621 /* MBOX buffer will be freed in mbox compl */
James Smart57127f12007-10-27 13:37:05 -0400622 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart9f1177a2010-02-26 14:12:57 -0500623 if (!pmb) {
624 phba->link_state = LPFC_HBA_ERROR;
625 return -ENOMEM;
626 }
627
James Smart57127f12007-10-27 13:37:05 -0400628 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
629 pmb->mbox_cmpl = lpfc_config_async_cmpl;
630 pmb->vport = phba->pport;
631 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea31012005-04-17 16:05:31 -0500632
James Smart57127f12007-10-27 13:37:05 -0400633 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart57127f12007-10-27 13:37:05 -0400635 "0456 Adapter failed to issue "
James Smarte4e74272009-07-19 10:01:38 -0400636 "ASYNCEVT_ENABLE mbox status x%x\n",
James Smart57127f12007-10-27 13:37:05 -0400637 rc);
638 mempool_free(pmb, phba->mbox_mem_pool);
639 }
James Smart97207482008-12-04 22:39:19 -0500640
641 /* Get Option rom version */
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart9f1177a2010-02-26 14:12:57 -0500643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647
James Smart97207482008-12-04 22:39:19 -0500648 lpfc_dump_wakeup_param(phba, pmb);
649 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
650 pmb->vport = phba->pport;
651 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
652
653 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
655 "0435 Adapter failed "
James Smarte4e74272009-07-19 10:01:38 -0400656 "to get Option ROM version status x%x\n", rc);
James Smart97207482008-12-04 22:39:19 -0500657 mempool_free(pmb, phba->mbox_mem_pool);
658 }
659
James Smartd7c255b2008-08-24 21:50:00 -0400660 return 0;
James Smartce8b3ce2006-07-06 15:50:36 -0400661}
662
James Smarte59058c2008-08-24 21:49:00 -0400663/**
James Smart84d1b002010-02-12 14:42:33 -0500664 * lpfc_hba_init_link - Initialize the FC link
665 * @phba: pointer to lpfc hba data structure.
James Smart6e7288d2010-06-07 15:23:35 -0400666 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
James Smart84d1b002010-02-12 14:42:33 -0500667 *
668 * This routine will issue the INIT_LINK mailbox command call.
669 * It is available to other drivers through the lpfc_hba data
670 * structure for use as a delayed link up mechanism with the
671 * module parameter lpfc_suppress_link_up.
672 *
673 * Return code
674 * 0 - success
675 * Any other value - error
676 **/
Rashika Kheriae399b222014-09-03 12:55:28 -0400677static int
James Smart6e7288d2010-06-07 15:23:35 -0400678lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
James Smart84d1b002010-02-12 14:42:33 -0500679{
James Smart1b511972011-12-13 13:23:09 -0500680 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
681}
682
683/**
684 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
685 * @phba: pointer to lpfc hba data structure.
686 * @fc_topology: desired fc topology.
687 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
688 *
689 * This routine will issue the INIT_LINK mailbox command call.
690 * It is available to other drivers through the lpfc_hba data
691 * structure for use as a delayed link up mechanism with the
692 * module parameter lpfc_suppress_link_up.
693 *
694 * Return code
695 * 0 - success
696 * Any other value - error
697 **/
698int
699lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
700 uint32_t flag)
701{
James Smart84d1b002010-02-12 14:42:33 -0500702 struct lpfc_vport *vport = phba->pport;
703 LPFC_MBOXQ_t *pmb;
704 MAILBOX_t *mb;
705 int rc;
706
707 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
708 if (!pmb) {
709 phba->link_state = LPFC_HBA_ERROR;
710 return -ENOMEM;
711 }
712 mb = &pmb->u.mb;
713 pmb->vport = vport;
714
James Smart026abb82011-12-13 13:20:45 -0500715 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
716 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
717 !(phba->lmt & LMT_1Gb)) ||
718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
719 !(phba->lmt & LMT_2Gb)) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
721 !(phba->lmt & LMT_4Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
723 !(phba->lmt & LMT_8Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
725 !(phba->lmt & LMT_10Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
James Smartd38dd522015-08-31 16:48:17 -0400727 !(phba->lmt & LMT_16Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
James Smartfbd8a6b2018-02-22 08:18:45 -0800729 !(phba->lmt & LMT_32Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
731 !(phba->lmt & LMT_64Gb))) {
James Smart026abb82011-12-13 13:20:45 -0500732 /* Reset link speed to auto */
Dick Kennedy372c1872020-06-30 14:50:00 -0700733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
734 "1302 Invalid speed for this board:%d "
735 "Reset link speed to auto.\n",
736 phba->cfg_link_speed);
James Smart026abb82011-12-13 13:20:45 -0500737 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
738 }
James Smart1b511972011-12-13 13:23:09 -0500739 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
James Smart84d1b002010-02-12 14:42:33 -0500740 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart1b511972011-12-13 13:23:09 -0500741 if (phba->sli_rev < LPFC_SLI_REV4)
742 lpfc_set_loopback_flag(phba);
James Smart6e7288d2010-06-07 15:23:35 -0400743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
James Smart76a95d72010-11-20 23:11:48 -0500744 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
746 "0498 Adapter failed to init, mbxCmd x%x "
747 "INIT_LINK, mbxStatus x%x\n",
748 mb->mbxCommand, mb->mbxStatus);
James Smart76a95d72010-11-20 23:11:48 -0500749 if (phba->sli_rev <= LPFC_SLI_REV3) {
750 /* Clear all interrupt enable conditions */
751 writel(0, phba->HCregaddr);
752 readl(phba->HCregaddr); /* flush */
753 /* Clear all pending interrupts */
754 writel(0xffffffff, phba->HAregaddr);
755 readl(phba->HAregaddr); /* flush */
756 }
James Smart84d1b002010-02-12 14:42:33 -0500757 phba->link_state = LPFC_HBA_ERROR;
James Smart6e7288d2010-06-07 15:23:35 -0400758 if (rc != MBX_BUSY || flag == MBX_POLL)
James Smart84d1b002010-02-12 14:42:33 -0500759 mempool_free(pmb, phba->mbox_mem_pool);
760 return -EIO;
761 }
James Smarte40a02c2010-02-26 14:13:54 -0500762 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
James Smart6e7288d2010-06-07 15:23:35 -0400763 if (flag == MBX_POLL)
764 mempool_free(pmb, phba->mbox_mem_pool);
James Smart84d1b002010-02-12 14:42:33 -0500765
766 return 0;
767}
768
769/**
770 * lpfc_hba_down_link - this routine downs the FC link
James Smart6e7288d2010-06-07 15:23:35 -0400771 * @phba: pointer to lpfc hba data structure.
772 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
James Smart84d1b002010-02-12 14:42:33 -0500773 *
774 * This routine will issue the DOWN_LINK mailbox command call.
775 * It is available to other drivers through the lpfc_hba data
776 * structure for use to stop the link.
777 *
778 * Return code
779 * 0 - success
780 * Any other value - error
781 **/
Rashika Kheriae399b222014-09-03 12:55:28 -0400782static int
James Smart6e7288d2010-06-07 15:23:35 -0400783lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
James Smart84d1b002010-02-12 14:42:33 -0500784{
785 LPFC_MBOXQ_t *pmb;
786 int rc;
787
788 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
789 if (!pmb) {
790 phba->link_state = LPFC_HBA_ERROR;
791 return -ENOMEM;
792 }
793
Dick Kennedy372c1872020-06-30 14:50:00 -0700794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
795 "0491 Adapter Link is disabled.\n");
James Smart84d1b002010-02-12 14:42:33 -0500796 lpfc_down_link(phba, pmb);
797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart6e7288d2010-06-07 15:23:35 -0400798 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
James Smart84d1b002010-02-12 14:42:33 -0500799 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
Dick Kennedy372c1872020-06-30 14:50:00 -0700800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
801 "2522 Adapter failed to issue DOWN_LINK"
802 " mbox command rc 0x%x\n", rc);
James Smart84d1b002010-02-12 14:42:33 -0500803
804 mempool_free(pmb, phba->mbox_mem_pool);
805 return -EIO;
806 }
James Smart6e7288d2010-06-07 15:23:35 -0400807 if (flag == MBX_POLL)
808 mempool_free(pmb, phba->mbox_mem_pool);
809
James Smart84d1b002010-02-12 14:42:33 -0500810 return 0;
811}
812
813/**
James Smart3621a712009-04-06 18:47:14 -0400814 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
James Smarte59058c2008-08-24 21:49:00 -0400815 * @phba: pointer to lpfc HBA data structure.
816 *
817 * This routine will do LPFC uninitialization before the HBA is reset when
818 * bringing down the SLI Layer.
819 *
820 * Return codes
821 * 0 - success.
822 * Any other value - error.
823 **/
dea31012005-04-17 16:05:31 -0500824int
James Smart2e0fef82007-06-17 19:56:36 -0500825lpfc_hba_down_prep(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -0500826{
James Smart1b32f6a2008-02-08 18:49:39 -0500827 struct lpfc_vport **vports;
828 int i;
James Smart3772a992009-05-22 14:50:54 -0400829
830 if (phba->sli_rev <= LPFC_SLI_REV3) {
831 /* Disable interrupts */
832 writel(0, phba->HCregaddr);
833 readl(phba->HCregaddr); /* flush */
834 }
dea31012005-04-17 16:05:31 -0500835
James Smart1b32f6a2008-02-08 18:49:39 -0500836 if (phba->pport->load_flag & FC_UNLOADING)
837 lpfc_cleanup_discovery_resources(phba->pport);
838 else {
839 vports = lpfc_create_vport_work_array(phba);
840 if (vports != NULL)
James Smart3772a992009-05-22 14:50:54 -0400841 for (i = 0; i <= phba->max_vports &&
842 vports[i] != NULL; i++)
James Smart1b32f6a2008-02-08 18:49:39 -0500843 lpfc_cleanup_discovery_resources(vports[i]);
844 lpfc_destroy_vport_work_array(phba, vports);
James Smart7f5f3d02008-02-08 18:50:14 -0500845 }
846 return 0;
dea31012005-04-17 16:05:31 -0500847}
848
James Smarte59058c2008-08-24 21:49:00 -0400849/**
James Smart68e814f2014-05-21 08:04:59 -0400850 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
851 * rspiocb which got deferred
852 *
853 * @phba: pointer to lpfc HBA data structure.
854 *
855 * This routine will cleanup completed slow path events after HBA is reset
856 * when bringing down the SLI Layer.
857 *
858 *
859 * Return codes
860 * void.
861 **/
862static void
863lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
864{
865 struct lpfc_iocbq *rspiocbq;
866 struct hbq_dmabuf *dmabuf;
867 struct lpfc_cq_event *cq_event;
868
869 spin_lock_irq(&phba->hbalock);
870 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
871 spin_unlock_irq(&phba->hbalock);
872
873 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
874 /* Get the response iocb from the head of work queue */
875 spin_lock_irq(&phba->hbalock);
876 list_remove_head(&phba->sli4_hba.sp_queue_event,
877 cq_event, struct lpfc_cq_event, list);
878 spin_unlock_irq(&phba->hbalock);
879
880 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
881 case CQE_CODE_COMPL_WQE:
882 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
883 cq_event);
884 lpfc_sli_release_iocbq(phba, rspiocbq);
885 break;
886 case CQE_CODE_RECEIVE:
887 case CQE_CODE_RECEIVE_V1:
888 dmabuf = container_of(cq_event, struct hbq_dmabuf,
889 cq_event);
890 lpfc_in_buf_free(phba, &dmabuf->dbuf);
891 }
892 }
893}
894
895/**
James Smartbcece5f2014-04-04 13:51:34 -0400896 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
897 * @phba: pointer to lpfc HBA data structure.
898 *
899 * This routine will cleanup posted ELS buffers after the HBA is reset
900 * when bringing down the SLI Layer.
901 *
902 *
903 * Return codes
904 * void.
905 **/
906static void
907lpfc_hba_free_post_buf(struct lpfc_hba *phba)
908{
909 struct lpfc_sli *psli = &phba->sli;
910 struct lpfc_sli_ring *pring;
911 struct lpfc_dmabuf *mp, *next_mp;
James Smart07eab622014-04-04 13:51:44 -0400912 LIST_HEAD(buflist);
913 int count;
James Smartbcece5f2014-04-04 13:51:34 -0400914
915 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
916 lpfc_sli_hbqbuf_free_all(phba);
917 else {
918 /* Cleanup preposted buffers on the ELS ring */
James Smart895427b2017-02-12 13:52:30 -0800919 pring = &psli->sli3_ring[LPFC_ELS_RING];
James Smart07eab622014-04-04 13:51:44 -0400920 spin_lock_irq(&phba->hbalock);
921 list_splice_init(&pring->postbufq, &buflist);
922 spin_unlock_irq(&phba->hbalock);
923
924 count = 0;
925 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
James Smartbcece5f2014-04-04 13:51:34 -0400926 list_del(&mp->list);
James Smart07eab622014-04-04 13:51:44 -0400927 count++;
James Smartbcece5f2014-04-04 13:51:34 -0400928 lpfc_mbuf_free(phba, mp->virt, mp->phys);
929 kfree(mp);
930 }
James Smart07eab622014-04-04 13:51:44 -0400931
932 spin_lock_irq(&phba->hbalock);
933 pring->postbufq_cnt -= count;
James Smartbcece5f2014-04-04 13:51:34 -0400934 spin_unlock_irq(&phba->hbalock);
935 }
936}
937
938/**
939 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
940 * @phba: pointer to lpfc HBA data structure.
941 *
942 * This routine will cleanup the txcmplq after the HBA is reset when bringing
943 * down the SLI Layer.
944 *
945 * Return codes
946 * void
947 **/
948static void
949lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
950{
951 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -0800952 struct lpfc_queue *qp = NULL;
James Smartbcece5f2014-04-04 13:51:34 -0400953 struct lpfc_sli_ring *pring;
954 LIST_HEAD(completions);
955 int i;
James Smartc1dd9112018-01-30 15:58:57 -0800956 struct lpfc_iocbq *piocb, *next_iocb;
James Smartbcece5f2014-04-04 13:51:34 -0400957
James Smart895427b2017-02-12 13:52:30 -0800958 if (phba->sli_rev != LPFC_SLI_REV4) {
959 for (i = 0; i < psli->num_rings; i++) {
960 pring = &psli->sli3_ring[i];
James Smartbcece5f2014-04-04 13:51:34 -0400961 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -0800962 /* At this point in time the HBA is either reset or DOA
963 * Nothing should be on txcmplq as it will
964 * NEVER complete.
965 */
966 list_splice_init(&pring->txcmplq, &completions);
967 pring->txcmplq_cnt = 0;
James Smartbcece5f2014-04-04 13:51:34 -0400968 spin_unlock_irq(&phba->hbalock);
969
James Smart895427b2017-02-12 13:52:30 -0800970 lpfc_sli_abort_iocb_ring(phba, pring);
971 }
James Smartbcece5f2014-04-04 13:51:34 -0400972 /* Cancel all the IOCBs from the completions list */
James Smart895427b2017-02-12 13:52:30 -0800973 lpfc_sli_cancel_iocbs(phba, &completions,
974 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
975 return;
976 }
977 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
978 pring = qp->pring;
979 if (!pring)
980 continue;
981 spin_lock_irq(&pring->ring_lock);
James Smartc1dd9112018-01-30 15:58:57 -0800982 list_for_each_entry_safe(piocb, next_iocb,
983 &pring->txcmplq, list)
984 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smart895427b2017-02-12 13:52:30 -0800985 list_splice_init(&pring->txcmplq, &completions);
986 pring->txcmplq_cnt = 0;
987 spin_unlock_irq(&pring->ring_lock);
James Smartbcece5f2014-04-04 13:51:34 -0400988 lpfc_sli_abort_iocb_ring(phba, pring);
989 }
James Smart895427b2017-02-12 13:52:30 -0800990 /* Cancel all the IOCBs from the completions list */
991 lpfc_sli_cancel_iocbs(phba, &completions,
992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
James Smartbcece5f2014-04-04 13:51:34 -0400993}
994
995/**
James Smart3772a992009-05-22 14:50:54 -0400996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
James Smarte59058c2008-08-24 21:49:00 -0400997 * @phba: pointer to lpfc HBA data structure.
998 *
999 * This routine will do uninitialization after the HBA is reset when bring
1000 * down the SLI Layer.
1001 *
1002 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001003 * 0 - success.
James Smarte59058c2008-08-24 21:49:00 -04001004 * Any other value - error.
1005 **/
James Smart3772a992009-05-22 14:50:54 -04001006static int
1007lpfc_hba_down_post_s3(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05001008{
James Smartbcece5f2014-04-04 13:51:34 -04001009 lpfc_hba_free_post_buf(phba);
1010 lpfc_hba_clean_txcmplq(phba);
Jamie Wellnitz41415862006-02-28 19:25:27 -05001011 return 0;
1012}
James Smart5af5eee2010-10-22 11:06:38 -04001013
James Smartda0436e2009-05-22 14:51:39 -04001014/**
1015 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1016 * @phba: pointer to lpfc HBA data structure.
1017 *
1018 * This routine will do uninitialization after the HBA is reset when bring
1019 * down the SLI Layer.
1020 *
1021 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001022 * 0 - success.
James Smartda0436e2009-05-22 14:51:39 -04001023 * Any other value - error.
1024 **/
1025static int
1026lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1027{
James Smartc4908502019-01-28 11:14:28 -08001028 struct lpfc_io_buf *psb, *psb_next;
James Smart7cacae22020-03-31 09:50:03 -07001029 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
James Smart5e5b5112019-01-28 11:14:22 -08001030 struct lpfc_sli4_hdw_queue *qp;
James Smartda0436e2009-05-22 14:51:39 -04001031 LIST_HEAD(aborts);
James Smart895427b2017-02-12 13:52:30 -08001032 LIST_HEAD(nvme_aborts);
James Smart86c67372017-04-21 16:05:04 -07001033 LIST_HEAD(nvmet_aborts);
James Smart0f65ff62010-02-26 14:14:23 -05001034 struct lpfc_sglq *sglq_entry = NULL;
James Smart5e5b5112019-01-28 11:14:22 -08001035 int cnt, idx;
James Smart0f65ff62010-02-26 14:14:23 -05001036
James Smart895427b2017-02-12 13:52:30 -08001037
1038 lpfc_sli_hbqbuf_free_all(phba);
James Smartbcece5f2014-04-04 13:51:34 -04001039 lpfc_hba_clean_txcmplq(phba);
1040
James Smartda0436e2009-05-22 14:51:39 -04001041 /* At this point in time the HBA is either reset or DOA. Either
1042 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
James Smart895427b2017-02-12 13:52:30 -08001043 * on the lpfc_els_sgl_list so that it can either be freed if the
James Smartda0436e2009-05-22 14:51:39 -04001044 * driver is unloading or reposted if the driver is restarting
1045 * the port.
1046 */
James Smarta7892412021-04-11 18:31:15 -07001047
James Smart895427b2017-02-12 13:52:30 -08001048 /* sgl_list_lock required because worker thread uses this
James Smartda0436e2009-05-22 14:51:39 -04001049 * list.
1050 */
James Smarta7892412021-04-11 18:31:15 -07001051 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart0f65ff62010-02-26 14:14:23 -05001052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
James Smartda0436e2009-05-22 14:51:39 -04001056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
James Smart895427b2017-02-12 13:52:30 -08001057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
James Smartf358dd02017-02-12 13:52:34 -08001059
James Smarta7892412021-04-11 18:31:15 -07001060 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart5e5b5112019-01-28 11:14:22 -08001061
1062 /* abts_xxxx_buf_list_lock required because worker thread uses this
James Smartda0436e2009-05-22 14:51:39 -04001063 * list.
1064 */
James Smarta7892412021-04-11 18:31:15 -07001065 spin_lock_irq(&phba->hbalock);
James Smart5e5b5112019-01-28 11:14:22 -08001066 cnt = 0;
1067 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1068 qp = &phba->sli4_hba.hdwq[idx];
1069
James Smartc00f62e2019-08-14 16:57:11 -07001070 spin_lock(&qp->abts_io_buf_list_lock);
1071 list_splice_init(&qp->lpfc_abts_io_buf_list,
James Smart895427b2017-02-12 13:52:30 -08001072 &aborts);
James Smart895427b2017-02-12 13:52:30 -08001073
James Smart0794d602019-01-28 11:14:19 -08001074 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
James Smart86c67372017-04-21 16:05:04 -07001075 psb->pCmd = NULL;
1076 psb->status = IOSTAT_SUCCESS;
James Smartcf1a1d32017-12-08 17:18:03 -08001077 cnt++;
James Smart86c67372017-04-21 16:05:04 -07001078 }
James Smart5e5b5112019-01-28 11:14:22 -08001079 spin_lock(&qp->io_buf_list_put_lock);
1080 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1081 qp->put_io_bufs += qp->abts_scsi_io_bufs;
James Smartc00f62e2019-08-14 16:57:11 -07001082 qp->put_io_bufs += qp->abts_nvme_io_bufs;
James Smart5e5b5112019-01-28 11:14:22 -08001083 qp->abts_scsi_io_bufs = 0;
James Smartc00f62e2019-08-14 16:57:11 -07001084 qp->abts_nvme_io_bufs = 0;
James Smart5e5b5112019-01-28 11:14:22 -08001085 spin_unlock(&qp->io_buf_list_put_lock);
James Smartc00f62e2019-08-14 16:57:11 -07001086 spin_unlock(&qp->abts_io_buf_list_lock);
James Smart5e5b5112019-01-28 11:14:22 -08001087 }
James Smart731eedc2019-03-12 16:30:12 -07001088 spin_unlock_irq(&phba->hbalock);
James Smart5e5b5112019-01-28 11:14:22 -08001089
1090 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart731eedc2019-03-12 16:30:12 -07001091 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
James Smart5e5b5112019-01-28 11:14:22 -08001092 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1093 &nvmet_aborts);
James Smart731eedc2019-03-12 16:30:12 -07001094 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
James Smart86c67372017-04-21 16:05:04 -07001095 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
James Smart7b7f5512020-03-31 09:50:04 -07001096 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
James Smart6c621a22017-05-15 15:20:45 -07001097 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
James Smart86c67372017-04-21 16:05:04 -07001098 }
James Smart895427b2017-02-12 13:52:30 -08001099 }
James Smart895427b2017-02-12 13:52:30 -08001100
James Smart68e814f2014-05-21 08:04:59 -04001101 lpfc_sli4_free_sp_events(phba);
James Smart5e5b5112019-01-28 11:14:22 -08001102 return cnt;
James Smartda0436e2009-05-22 14:51:39 -04001103}
1104
1105/**
1106 * lpfc_hba_down_post - Wrapper func for hba down post routine
1107 * @phba: pointer to lpfc HBA data structure.
1108 *
1109 * This routine wraps the actual SLI3 or SLI4 routine for performing
1110 * uninitialization after the HBA is reset when bring down the SLI Layer.
1111 *
1112 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001113 * 0 - success.
James Smartda0436e2009-05-22 14:51:39 -04001114 * Any other value - error.
1115 **/
1116int
1117lpfc_hba_down_post(struct lpfc_hba *phba)
1118{
1119 return (*phba->lpfc_hba_down_post)(phba);
1120}
Jamie Wellnitz41415862006-02-28 19:25:27 -05001121
James Smarte59058c2008-08-24 21:49:00 -04001122/**
James Smart3621a712009-04-06 18:47:14 -04001123 * lpfc_hb_timeout - The HBA-timer timeout handler
Lee Jonesfe614ac2020-07-23 13:24:22 +01001124 * @t: timer context used to obtain the pointer to lpfc hba data structure.
James Smarte59058c2008-08-24 21:49:00 -04001125 *
1126 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1127 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1128 * work-port-events bitmap and the worker thread is notified. This timeout
1129 * event will be used by the worker thread to invoke the actual timeout
1130 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1131 * be performed in the timeout handler and the HBA timeout event bit shall
1132 * be cleared by the worker thread after it has taken the event bitmap out.
1133 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001134static void
Kees Cookf22eb4d2017-09-06 20:24:26 -07001135lpfc_hb_timeout(struct timer_list *t)
James Smart858c9f62007-06-17 19:56:39 -05001136{
1137 struct lpfc_hba *phba;
James Smart5e9d9b82008-06-14 22:52:53 -04001138 uint32_t tmo_posted;
James Smart858c9f62007-06-17 19:56:39 -05001139 unsigned long iflag;
1140
Kees Cookf22eb4d2017-09-06 20:24:26 -07001141 phba = from_timer(phba, t, hb_tmofunc);
James Smart93996272008-08-24 21:50:30 -04001142
1143 /* Check for heart beat timeout conditions */
James Smart858c9f62007-06-17 19:56:39 -05001144 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart5e9d9b82008-06-14 22:52:53 -04001145 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1146 if (!tmo_posted)
James Smart858c9f62007-06-17 19:56:39 -05001147 phba->pport->work_port_events |= WORKER_HB_TMO;
1148 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1149
James Smart93996272008-08-24 21:50:30 -04001150 /* Tell the worker thread there is work to do */
James Smart5e9d9b82008-06-14 22:52:53 -04001151 if (!tmo_posted)
1152 lpfc_worker_wake_up(phba);
James Smart858c9f62007-06-17 19:56:39 -05001153 return;
1154}
1155
James Smarte59058c2008-08-24 21:49:00 -04001156/**
James Smart19ca7602010-11-20 23:11:55 -05001157 * lpfc_rrq_timeout - The RRQ-timer timeout handler
Lee Jonesfe614ac2020-07-23 13:24:22 +01001158 * @t: timer context used to obtain the pointer to lpfc hba data structure.
James Smart19ca7602010-11-20 23:11:55 -05001159 *
1160 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1161 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1162 * work-port-events bitmap and the worker thread is notified. This timeout
1163 * event will be used by the worker thread to invoke the actual timeout
1164 * handler routine, lpfc_rrq_handler. Any periodical operations will
1165 * be performed in the timeout handler and the RRQ timeout event bit shall
1166 * be cleared by the worker thread after it has taken the event bitmap out.
1167 **/
1168static void
Kees Cookf22eb4d2017-09-06 20:24:26 -07001169lpfc_rrq_timeout(struct timer_list *t)
James Smart19ca7602010-11-20 23:11:55 -05001170{
1171 struct lpfc_hba *phba;
James Smart19ca7602010-11-20 23:11:55 -05001172 unsigned long iflag;
1173
Kees Cookf22eb4d2017-09-06 20:24:26 -07001174 phba = from_timer(phba, t, rrq_tmr);
James Smart19ca7602010-11-20 23:11:55 -05001175 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart06918ac2014-02-20 09:57:57 -05001176 if (!(phba->pport->load_flag & FC_UNLOADING))
1177 phba->hba_flag |= HBA_RRQ_ACTIVE;
1178 else
1179 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
James Smart19ca7602010-11-20 23:11:55 -05001180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
James Smart06918ac2014-02-20 09:57:57 -05001181
1182 if (!(phba->pport->load_flag & FC_UNLOADING))
1183 lpfc_worker_wake_up(phba);
James Smart19ca7602010-11-20 23:11:55 -05001184}
1185
1186/**
James Smart3621a712009-04-06 18:47:14 -04001187 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
James Smarte59058c2008-08-24 21:49:00 -04001188 * @phba: pointer to lpfc hba data structure.
1189 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1190 *
1191 * This is the callback function to the lpfc heart-beat mailbox command.
1192 * If configured, the lpfc driver issues the heart-beat mailbox command to
1193 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1194 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1195 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1196 * heart-beat outstanding state. Once the mailbox command comes back and
1197 * no error conditions detected, the heart-beat mailbox command timer is
1198 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1199 * state is cleared for the next heart-beat. If the timer expired with the
1200 * heart-beat outstanding state set, the driver will put the HBA offline.
1201 **/
James Smart858c9f62007-06-17 19:56:39 -05001202static void
1203lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1204{
1205 unsigned long drvr_flag;
1206
1207 spin_lock_irqsave(&phba->hbalock, drvr_flag);
James Smarta22d73b2021-01-04 10:02:38 -08001208 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
James Smart858c9f62007-06-17 19:56:39 -05001209 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1210
James Smarta22d73b2021-01-04 10:02:38 -08001211 /* Check and reset heart-beat timer if necessary */
James Smart858c9f62007-06-17 19:56:39 -05001212 mempool_free(pmboxq, phba->mbox_mem_pool);
1213 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1214 !(phba->link_state == LPFC_HBA_ERROR) &&
James Smart51ef4c22007-08-02 11:10:31 -04001215 !(phba->pport->load_flag & FC_UNLOADING))
James Smart858c9f62007-06-17 19:56:39 -05001216 mod_timer(&phba->hb_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04001217 jiffies +
1218 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
James Smart858c9f62007-06-17 19:56:39 -05001219 return;
1220}
1221
Lee Jonesfe614ac2020-07-23 13:24:22 +01001222/*
Dick Kennedy317aeb82020-06-30 14:49:59 -07001223 * lpfc_idle_stat_delay_work - idle_stat tracking
1224 *
1225 * This routine tracks per-cq idle_stat and determines polling decisions.
1226 *
1227 * Return codes:
1228 * None
1229 **/
1230static void
1231lpfc_idle_stat_delay_work(struct work_struct *work)
1232{
1233 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1234 struct lpfc_hba,
1235 idle_stat_delay_work);
1236 struct lpfc_queue *cq;
1237 struct lpfc_sli4_hdw_queue *hdwq;
1238 struct lpfc_idle_stat *idle_stat;
1239 u32 i, idle_percent;
1240 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1241
1242 if (phba->pport->load_flag & FC_UNLOADING)
1243 return;
1244
1245 if (phba->link_state == LPFC_HBA_ERROR ||
1246 phba->pport->fc_flag & FC_OFFLINE_MODE)
1247 goto requeue;
1248
1249 for_each_present_cpu(i) {
1250 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1251 cq = hdwq->io_cq;
1252
1253 /* Skip if we've already handled this cq's primary CPU */
1254 if (cq->chann != i)
1255 continue;
1256
1257 idle_stat = &phba->sli4_hba.idle_stat[i];
1258
1259 /* get_cpu_idle_time returns values as running counters. Thus,
1260 * to know the amount for this period, the prior counter values
1261 * need to be subtracted from the current counter values.
1262 * From there, the idle time stat can be calculated as a
1263 * percentage of 100 - the sum of the other consumption times.
1264 */
1265 wall_idle = get_cpu_idle_time(i, &wall, 1);
1266 diff_idle = wall_idle - idle_stat->prev_idle;
1267 diff_wall = wall - idle_stat->prev_wall;
1268
1269 if (diff_wall <= diff_idle)
1270 busy_time = 0;
1271 else
1272 busy_time = diff_wall - diff_idle;
1273
1274 idle_percent = div64_u64(100 * busy_time, diff_wall);
1275 idle_percent = 100 - idle_percent;
1276
1277 if (idle_percent < 15)
1278 cq->poll_mode = LPFC_QUEUE_WORK;
1279 else
1280 cq->poll_mode = LPFC_IRQ_POLL;
1281
1282 idle_stat->prev_idle = wall_idle;
1283 idle_stat->prev_wall = wall;
1284 }
1285
1286requeue:
1287 schedule_delayed_work(&phba->idle_stat_delay_work,
1288 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1289}
1290
James Smart32517fc2019-01-28 11:14:33 -08001291static void
1292lpfc_hb_eq_delay_work(struct work_struct *work)
1293{
1294 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1295 struct lpfc_hba, eq_delay_work);
1296 struct lpfc_eq_intr_info *eqi, *eqi_new;
1297 struct lpfc_queue *eq, *eq_next;
James Smart8156d372019-10-18 14:18:26 -07001298 unsigned char *ena_delay = NULL;
James Smart32517fc2019-01-28 11:14:33 -08001299 uint32_t usdelay;
1300 int i;
1301
1302 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1303 return;
1304
1305 if (phba->link_state == LPFC_HBA_ERROR ||
1306 phba->pport->fc_flag & FC_OFFLINE_MODE)
1307 goto requeue;
1308
James Smart8156d372019-10-18 14:18:26 -07001309 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1310 GFP_KERNEL);
1311 if (!ena_delay)
James Smart32517fc2019-01-28 11:14:33 -08001312 goto requeue;
1313
James Smart8156d372019-10-18 14:18:26 -07001314 for (i = 0; i < phba->cfg_irq_chann; i++) {
1315 /* Get the EQ corresponding to the IRQ vector */
1316 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1317 if (!eq)
1318 continue;
1319 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1320 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1321 ena_delay[eq->last_cpu] = 1;
James Smart8d34a592019-08-14 16:56:35 -07001322 }
James Smart8156d372019-10-18 14:18:26 -07001323 }
James Smart32517fc2019-01-28 11:14:33 -08001324
1325 for_each_present_cpu(i) {
James Smart32517fc2019-01-28 11:14:33 -08001326 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
James Smart8156d372019-10-18 14:18:26 -07001327 if (ena_delay[i]) {
1328 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1329 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1330 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1331 } else {
1332 usdelay = 0;
James Smart8d34a592019-08-14 16:56:35 -07001333 }
James Smart32517fc2019-01-28 11:14:33 -08001334
James Smart32517fc2019-01-28 11:14:33 -08001335 eqi->icnt = 0;
1336
1337 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
James Smart8156d372019-10-18 14:18:26 -07001338 if (unlikely(eq->last_cpu != i)) {
James Smart32517fc2019-01-28 11:14:33 -08001339 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1340 eq->last_cpu);
1341 list_move_tail(&eq->cpu_list, &eqi_new->list);
1342 continue;
1343 }
1344 if (usdelay != eq->q_mode)
1345 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1346 usdelay);
1347 }
1348 }
1349
James Smart8156d372019-10-18 14:18:26 -07001350 kfree(ena_delay);
James Smart32517fc2019-01-28 11:14:33 -08001351
1352requeue:
1353 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1354 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1355}
1356
James Smarte59058c2008-08-24 21:49:00 -04001357/**
James Smartc4908502019-01-28 11:14:28 -08001358 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1359 * @phba: pointer to lpfc hba data structure.
1360 *
1361 * For each heartbeat, this routine does some heuristic methods to adjust
1362 * XRI distribution. The goal is to fully utilize free XRIs.
1363 **/
1364static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1365{
1366 u32 i;
1367 u32 hwq_count;
1368
1369 hwq_count = phba->cfg_hdw_queue;
1370 for (i = 0; i < hwq_count; i++) {
1371 /* Adjust XRIs in private pool */
1372 lpfc_adjust_pvt_pool_count(phba, i);
1373
1374 /* Adjust high watermark */
1375 lpfc_adjust_high_watermark(phba, i);
1376
1377#ifdef LPFC_MXP_STAT
1378 /* Snapshot pbl, pvt and busy count */
1379 lpfc_snapshot_mxp(phba, i);
1380#endif
1381 }
1382}
1383
James Smarte59058c2008-08-24 21:49:00 -04001384/**
James Smarta22d73b2021-01-04 10:02:38 -08001385 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1386 * @phba: pointer to lpfc hba data structure.
1387 *
1388 * If a HB mbox is not already in progrees, this routine will allocate
1389 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1390 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1391 **/
1392int
1393lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1394{
1395 LPFC_MBOXQ_t *pmboxq;
1396 int retval;
1397
1398 /* Is a Heartbeat mbox already in progress */
1399 if (phba->hba_flag & HBA_HBEAT_INP)
1400 return 0;
1401
1402 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1403 if (!pmboxq)
1404 return -ENOMEM;
1405
1406 lpfc_heart_beat(phba, pmboxq);
1407 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1408 pmboxq->vport = phba->pport;
1409 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1410
1411 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1412 mempool_free(pmboxq, phba->mbox_mem_pool);
1413 return -ENXIO;
1414 }
1415 phba->hba_flag |= HBA_HBEAT_INP;
1416
1417 return 0;
1418}
1419
1420/**
1421 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1422 * @phba: pointer to lpfc hba data structure.
1423 *
1424 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1425 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1426 * of the value of lpfc_enable_hba_heartbeat.
1427 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1428 * try to issue a MBX_HEARTBEAT mbox command.
1429 **/
1430void
1431lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1432{
1433 if (phba->cfg_enable_hba_heartbeat)
1434 return;
1435 phba->hba_flag |= HBA_HBEAT_TMO;
1436}
1437
1438/**
James Smart3621a712009-04-06 18:47:14 -04001439 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
James Smarte59058c2008-08-24 21:49:00 -04001440 * @phba: pointer to lpfc hba data structure.
1441 *
1442 * This is the actual HBA-timer timeout handler to be invoked by the worker
1443 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1444 * handler performs any periodic operations needed for the device. If such
1445 * periodic event has already been attended to either in the interrupt handler
1446 * or by processing slow-ring or fast-ring events within the HBA-timer
1447 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1448 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1449 * is configured and there is no heart-beat mailbox command outstanding, a
1450 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1451 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1452 * to offline.
1453 **/
James Smart858c9f62007-06-17 19:56:39 -05001454void
1455lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1456{
James Smart45ed1192009-10-02 15:17:02 -04001457 struct lpfc_vport **vports;
James Smart0ff10d42008-01-11 01:52:36 -05001458 struct lpfc_dmabuf *buf_ptr;
James Smarta22d73b2021-01-04 10:02:38 -08001459 int retval = 0;
1460 int i, tmo;
James Smart858c9f62007-06-17 19:56:39 -05001461 struct lpfc_sli *psli = &phba->sli;
James Smart0ff10d42008-01-11 01:52:36 -05001462 LIST_HEAD(completions);
James Smart858c9f62007-06-17 19:56:39 -05001463
James Smartc4908502019-01-28 11:14:28 -08001464 if (phba->cfg_xri_rebalancing) {
1465 /* Multi-XRI pools handler */
1466 lpfc_hb_mxp_handler(phba);
1467 }
James Smart858c9f62007-06-17 19:56:39 -05001468
James Smart45ed1192009-10-02 15:17:02 -04001469 vports = lpfc_create_vport_work_array(phba);
1470 if (vports != NULL)
James Smart4258e982015-12-16 18:11:58 -05001471 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
James Smart45ed1192009-10-02 15:17:02 -04001472 lpfc_rcv_seq_check_edtov(vports[i]);
James Smarte3ba04c2019-12-18 15:58:02 -08001473 lpfc_fdmi_change_check(vports[i]);
James Smart4258e982015-12-16 18:11:58 -05001474 }
James Smart45ed1192009-10-02 15:17:02 -04001475 lpfc_destroy_vport_work_array(phba, vports);
1476
James Smart858c9f62007-06-17 19:56:39 -05001477 if ((phba->link_state == LPFC_HBA_ERROR) ||
James Smart51ef4c22007-08-02 11:10:31 -04001478 (phba->pport->load_flag & FC_UNLOADING) ||
James Smart858c9f62007-06-17 19:56:39 -05001479 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1480 return;
1481
James Smart0ff10d42008-01-11 01:52:36 -05001482 if (phba->elsbuf_cnt &&
1483 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1484 spin_lock_irq(&phba->hbalock);
1485 list_splice_init(&phba->elsbuf, &completions);
1486 phba->elsbuf_cnt = 0;
1487 phba->elsbuf_prev_cnt = 0;
1488 spin_unlock_irq(&phba->hbalock);
1489
1490 while (!list_empty(&completions)) {
1491 list_remove_head(&completions, buf_ptr,
1492 struct lpfc_dmabuf, list);
1493 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1494 kfree(buf_ptr);
1495 }
1496 }
1497 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1498
James Smart858c9f62007-06-17 19:56:39 -05001499 /* If there is no heart beat outstanding, issue a heartbeat command */
James Smart13815c82008-01-11 01:52:48 -05001500 if (phba->cfg_enable_hba_heartbeat) {
James Smarta22d73b2021-01-04 10:02:38 -08001501 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1502 spin_lock_irq(&phba->pport->work_port_lock);
1503 if (time_after(phba->last_completion_time +
1504 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1505 jiffies)) {
1506 spin_unlock_irq(&phba->pport->work_port_lock);
1507 if (phba->hba_flag & HBA_HBEAT_INP)
1508 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1509 else
1510 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1511 goto out;
1512 }
1513 spin_unlock_irq(&phba->pport->work_port_lock);
1514
1515 /* Check if a MBX_HEARTBEAT is already in progress */
1516 if (phba->hba_flag & HBA_HBEAT_INP) {
1517 /*
1518 * If heart beat timeout called with HBA_HBEAT_INP set
1519 * we need to give the hb mailbox cmd a chance to
1520 * complete or TMO.
1521 */
1522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1523 "0459 Adapter heartbeat still outstanding: "
1524 "last compl time was %d ms.\n",
1525 jiffies_to_msecs(jiffies
1526 - phba->last_completion_time));
1527 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1528 } else {
James Smartbc739052010-08-04 16:11:18 -04001529 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1530 (list_empty(&psli->mboxq))) {
James Smart13815c82008-01-11 01:52:48 -05001531
James Smarta22d73b2021-01-04 10:02:38 -08001532 retval = lpfc_issue_hb_mbox(phba);
1533 if (retval) {
1534 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1535 goto out;
James Smartbc739052010-08-04 16:11:18 -04001536 }
1537 phba->skipped_hb = 0;
James Smartbc739052010-08-04 16:11:18 -04001538 } else if (time_before_eq(phba->last_completion_time,
1539 phba->skipped_hb)) {
1540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1541 "2857 Last completion time not "
1542 " updated in %d ms\n",
1543 jiffies_to_msecs(jiffies
1544 - phba->last_completion_time));
1545 } else
1546 phba->skipped_hb = jiffies;
1547
James Smarta22d73b2021-01-04 10:02:38 -08001548 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1549 goto out;
James Smart858c9f62007-06-17 19:56:39 -05001550 }
James Smart4258e982015-12-16 18:11:58 -05001551 } else {
James Smarta22d73b2021-01-04 10:02:38 -08001552 /* Check to see if we want to force a MBX_HEARTBEAT */
1553 if (phba->hba_flag & HBA_HBEAT_TMO) {
1554 retval = lpfc_issue_hb_mbox(phba);
1555 if (retval)
1556 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1557 else
1558 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1559 goto out;
1560 }
1561 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
James Smart858c9f62007-06-17 19:56:39 -05001562 }
James Smarta22d73b2021-01-04 10:02:38 -08001563out:
1564 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
James Smart858c9f62007-06-17 19:56:39 -05001565}
1566
James Smarte59058c2008-08-24 21:49:00 -04001567/**
James Smart3621a712009-04-06 18:47:14 -04001568 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
James Smarte59058c2008-08-24 21:49:00 -04001569 * @phba: pointer to lpfc hba data structure.
1570 *
1571 * This routine is called to bring the HBA offline when HBA hardware error
1572 * other than Port Error 6 has been detected.
1573 **/
James Smart09372822008-01-11 01:52:54 -05001574static void
1575lpfc_offline_eratt(struct lpfc_hba *phba)
1576{
1577 struct lpfc_sli *psli = &phba->sli;
1578
1579 spin_lock_irq(&phba->hbalock);
James Smartf4b4c682009-05-22 14:53:12 -04001580 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart09372822008-01-11 01:52:54 -05001581 spin_unlock_irq(&phba->hbalock);
James Smart618a5232012-06-12 13:54:36 -04001582 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
James Smart09372822008-01-11 01:52:54 -05001583
1584 lpfc_offline(phba);
1585 lpfc_reset_barrier(phba);
James Smartf4b4c682009-05-22 14:53:12 -04001586 spin_lock_irq(&phba->hbalock);
James Smart09372822008-01-11 01:52:54 -05001587 lpfc_sli_brdreset(phba);
James Smartf4b4c682009-05-22 14:53:12 -04001588 spin_unlock_irq(&phba->hbalock);
James Smart09372822008-01-11 01:52:54 -05001589 lpfc_hba_down_post(phba);
1590 lpfc_sli_brdready(phba, HS_MBRDY);
1591 lpfc_unblock_mgmt_io(phba);
1592 phba->link_state = LPFC_HBA_ERROR;
1593 return;
1594}
1595
James Smarte59058c2008-08-24 21:49:00 -04001596/**
James Smartda0436e2009-05-22 14:51:39 -04001597 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1598 * @phba: pointer to lpfc hba data structure.
1599 *
1600 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1601 * other than Port Error 6 has been detected.
1602 **/
James Smarta88dbb62013-04-17 20:18:39 -04001603void
James Smartda0436e2009-05-22 14:51:39 -04001604lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1605{
James Smart946727d2015-04-07 15:07:09 -04001606 spin_lock_irq(&phba->hbalock);
1607 phba->link_state = LPFC_HBA_ERROR;
1608 spin_unlock_irq(&phba->hbalock);
1609
James Smart618a5232012-06-12 13:54:36 -04001610 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
James Smartc00f62e2019-08-14 16:57:11 -07001611 lpfc_sli_flush_io_rings(phba);
James Smartda0436e2009-05-22 14:51:39 -04001612 lpfc_offline(phba);
James Smartda0436e2009-05-22 14:51:39 -04001613 lpfc_hba_down_post(phba);
James Smartda0436e2009-05-22 14:51:39 -04001614 lpfc_unblock_mgmt_io(phba);
James Smartda0436e2009-05-22 14:51:39 -04001615}
1616
1617/**
James Smarta257bf92009-04-06 18:48:10 -04001618 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1619 * @phba: pointer to lpfc hba data structure.
1620 *
1621 * This routine is invoked to handle the deferred HBA hardware error
1622 * conditions. This type of error is indicated by HBA by setting ER1
1623 * and another ER bit in the host status register. The driver will
1624 * wait until the ER1 bit clears before handling the error condition.
1625 **/
1626static void
1627lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1628{
1629 uint32_t old_host_status = phba->work_hs;
James Smarta257bf92009-04-06 18:48:10 -04001630 struct lpfc_sli *psli = &phba->sli;
1631
James Smartf4b4c682009-05-22 14:53:12 -04001632 /* If the pci channel is offline, ignore possible errors,
1633 * since we cannot communicate with the pci card anyway.
1634 */
1635 if (pci_channel_offline(phba->pcidev)) {
1636 spin_lock_irq(&phba->hbalock);
1637 phba->hba_flag &= ~DEFER_ERATT;
1638 spin_unlock_irq(&phba->hbalock);
1639 return;
1640 }
1641
Dick Kennedy372c1872020-06-30 14:50:00 -07001642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1643 "0479 Deferred Adapter Hardware Error "
1644 "Data: x%x x%x x%x\n",
1645 phba->work_hs, phba->work_status[0],
1646 phba->work_status[1]);
James Smarta257bf92009-04-06 18:48:10 -04001647
1648 spin_lock_irq(&phba->hbalock);
James Smartf4b4c682009-05-22 14:53:12 -04001649 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smarta257bf92009-04-06 18:48:10 -04001650 spin_unlock_irq(&phba->hbalock);
1651
1652
1653 /*
1654 * Firmware stops when it triggred erratt. That could cause the I/Os
1655 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1656 * SCSI layer retry it after re-establishing link.
1657 */
James Smartdb55fba2014-04-04 13:52:02 -04001658 lpfc_sli_abort_fcp_rings(phba);
James Smarta257bf92009-04-06 18:48:10 -04001659
1660 /*
1661 * There was a firmware error. Take the hba offline and then
1662 * attempt to restart it.
1663 */
James Smart618a5232012-06-12 13:54:36 -04001664 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
James Smarta257bf92009-04-06 18:48:10 -04001665 lpfc_offline(phba);
1666
1667 /* Wait for the ER1 bit to clear.*/
1668 while (phba->work_hs & HS_FFER1) {
1669 msleep(100);
James Smart9940b972011-03-11 16:06:12 -05001670 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1671 phba->work_hs = UNPLUG_ERR ;
1672 break;
1673 }
James Smarta257bf92009-04-06 18:48:10 -04001674 /* If driver is unloading let the worker thread continue */
1675 if (phba->pport->load_flag & FC_UNLOADING) {
1676 phba->work_hs = 0;
1677 break;
1678 }
1679 }
1680
1681 /*
1682 * This is to ptrotect against a race condition in which
1683 * first write to the host attention register clear the
1684 * host status register.
1685 */
1686 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1687 phba->work_hs = old_host_status & ~HS_FFER1;
1688
James Smart3772a992009-05-22 14:50:54 -04001689 spin_lock_irq(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -04001690 phba->hba_flag &= ~DEFER_ERATT;
James Smart3772a992009-05-22 14:50:54 -04001691 spin_unlock_irq(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -04001692 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1693 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1694}
1695
James Smart3772a992009-05-22 14:50:54 -04001696static void
1697lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1698{
1699 struct lpfc_board_event_header board_event;
1700 struct Scsi_Host *shost;
1701
1702 board_event.event_type = FC_REG_BOARD_EVENT;
1703 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1704 shost = lpfc_shost_from_vport(phba->pport);
1705 fc_host_post_vendor_event(shost, fc_get_event_number(),
1706 sizeof(board_event),
1707 (char *) &board_event,
1708 LPFC_NL_VENDOR_ID);
1709}
1710
James Smarta257bf92009-04-06 18:48:10 -04001711/**
James Smart3772a992009-05-22 14:50:54 -04001712 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
James Smarte59058c2008-08-24 21:49:00 -04001713 * @phba: pointer to lpfc hba data structure.
1714 *
1715 * This routine is invoked to handle the following HBA hardware error
1716 * conditions:
1717 * 1 - HBA error attention interrupt
1718 * 2 - DMA ring index out of range
1719 * 3 - Mailbox command came back as unknown
1720 **/
James Smart3772a992009-05-22 14:50:54 -04001721static void
1722lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05001723{
James Smart2e0fef82007-06-17 19:56:36 -05001724 struct lpfc_vport *vport = phba->pport;
James Smart2e0fef82007-06-17 19:56:36 -05001725 struct lpfc_sli *psli = &phba->sli;
James Smartd2873e42006-08-18 17:46:43 -04001726 uint32_t event_data;
James Smart57127f12007-10-27 13:37:05 -04001727 unsigned long temperature;
1728 struct temp_event temp_event_data;
James Smart92d7f7b2007-06-17 19:56:38 -05001729 struct Scsi_Host *shost;
James Smart2e0fef82007-06-17 19:56:36 -05001730
Linas Vepstas8d63f372007-02-14 14:28:36 -06001731 /* If the pci channel is offline, ignore possible errors,
James Smart3772a992009-05-22 14:50:54 -04001732 * since we cannot communicate with the pci card anyway.
1733 */
1734 if (pci_channel_offline(phba->pcidev)) {
1735 spin_lock_irq(&phba->hbalock);
1736 phba->hba_flag &= ~DEFER_ERATT;
1737 spin_unlock_irq(&phba->hbalock);
Linas Vepstas8d63f372007-02-14 14:28:36 -06001738 return;
James Smart3772a992009-05-22 14:50:54 -04001739 }
1740
James Smart13815c82008-01-11 01:52:48 -05001741 /* If resets are disabled then leave the HBA alone and return */
1742 if (!phba->cfg_enable_hba_reset)
1743 return;
dea31012005-04-17 16:05:31 -05001744
James Smartea2151b2008-09-07 11:52:10 -04001745 /* Send an internal error event to mgmt application */
James Smart3772a992009-05-22 14:50:54 -04001746 lpfc_board_errevt_to_mgmt(phba);
James Smartea2151b2008-09-07 11:52:10 -04001747
James Smarta257bf92009-04-06 18:48:10 -04001748 if (phba->hba_flag & DEFER_ERATT)
1749 lpfc_handle_deferred_eratt(phba);
1750
James Smartdcf2a4e2010-09-29 11:18:53 -04001751 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1752 if (phba->work_hs & HS_FFER6)
1753 /* Re-establishing Link */
1754 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1755 "1301 Re-establishing Link "
1756 "Data: x%x x%x x%x\n",
1757 phba->work_hs, phba->work_status[0],
1758 phba->work_status[1]);
1759 if (phba->work_hs & HS_FFER8)
1760 /* Device Zeroization */
1761 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762 "2861 Host Authentication device "
1763 "zeroization Data:x%x x%x x%x\n",
1764 phba->work_hs, phba->work_status[0],
1765 phba->work_status[1]);
James Smart58da1ff2008-04-07 10:15:56 -04001766
James Smart92d7f7b2007-06-17 19:56:38 -05001767 spin_lock_irq(&phba->hbalock);
James Smartf4b4c682009-05-22 14:53:12 -04001768 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart92d7f7b2007-06-17 19:56:38 -05001769 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001770
1771 /*
1772 * Firmware stops when it triggled erratt with HS_FFER6.
1773 * That could cause the I/Os dropped by the firmware.
1774 * Error iocb (I/O) on txcmplq and let the SCSI layer
1775 * retry it after re-establishing link.
1776 */
James Smartdb55fba2014-04-04 13:52:02 -04001777 lpfc_sli_abort_fcp_rings(phba);
dea31012005-04-17 16:05:31 -05001778
dea31012005-04-17 16:05:31 -05001779 /*
1780 * There was a firmware error. Take the hba offline and then
1781 * attempt to restart it.
1782 */
James Smart618a5232012-06-12 13:54:36 -04001783 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
dea31012005-04-17 16:05:31 -05001784 lpfc_offline(phba);
Jamie Wellnitz41415862006-02-28 19:25:27 -05001785 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05001786 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
James Smart46fa3112007-04-25 09:51:45 -04001787 lpfc_unblock_mgmt_io(phba);
dea31012005-04-17 16:05:31 -05001788 return;
1789 }
James Smart46fa3112007-04-25 09:51:45 -04001790 lpfc_unblock_mgmt_io(phba);
James Smart57127f12007-10-27 13:37:05 -04001791 } else if (phba->work_hs & HS_CRIT_TEMP) {
1792 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1793 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1794 temp_event_data.event_code = LPFC_CRIT_TEMP;
1795 temp_event_data.data = (uint32_t)temperature;
1796
Dick Kennedy372c1872020-06-30 14:50:00 -07001797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartd7c255b2008-08-24 21:50:00 -04001798 "0406 Adapter maximum temperature exceeded "
James Smart57127f12007-10-27 13:37:05 -04001799 "(%ld), taking this port offline "
1800 "Data: x%x x%x x%x\n",
1801 temperature, phba->work_hs,
1802 phba->work_status[0], phba->work_status[1]);
1803
1804 shost = lpfc_shost_from_vport(phba->pport);
1805 fc_host_post_vendor_event(shost, fc_get_event_number(),
1806 sizeof(temp_event_data),
1807 (char *) &temp_event_data,
1808 SCSI_NL_VID_TYPE_PCI
1809 | PCI_VENDOR_ID_EMULEX);
1810
James Smart7af67052007-10-27 13:38:11 -04001811 spin_lock_irq(&phba->hbalock);
James Smart7af67052007-10-27 13:38:11 -04001812 phba->over_temp_state = HBA_OVER_TEMP;
1813 spin_unlock_irq(&phba->hbalock);
James Smart09372822008-01-11 01:52:54 -05001814 lpfc_offline_eratt(phba);
James Smart57127f12007-10-27 13:37:05 -04001815
dea31012005-04-17 16:05:31 -05001816 } else {
1817 /* The if clause above forces this code path when the status
James Smart93996272008-08-24 21:50:30 -04001818 * failure is a value other than FFER6. Do not call the offline
1819 * twice. This is the adapter hardware error path.
dea31012005-04-17 16:05:31 -05001820 */
Dick Kennedy372c1872020-06-30 14:50:00 -07001821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte8b62012007-08-02 11:10:09 -04001822 "0457 Adapter Hardware Error "
dea31012005-04-17 16:05:31 -05001823 "Data: x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04001824 phba->work_hs,
dea31012005-04-17 16:05:31 -05001825 phba->work_status[0], phba->work_status[1]);
1826
James Smartd2873e42006-08-18 17:46:43 -04001827 event_data = FC_REG_DUMP_EVENT;
James Smart92d7f7b2007-06-17 19:56:38 -05001828 shost = lpfc_shost_from_vport(vport);
James Smart2e0fef82007-06-17 19:56:36 -05001829 fc_host_post_vendor_event(shost, fc_get_event_number(),
James Smartd2873e42006-08-18 17:46:43 -04001830 sizeof(event_data), (char *) &event_data,
1831 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1832
James Smart09372822008-01-11 01:52:54 -05001833 lpfc_offline_eratt(phba);
dea31012005-04-17 16:05:31 -05001834 }
James Smart93996272008-08-24 21:50:30 -04001835 return;
dea31012005-04-17 16:05:31 -05001836}
1837
James Smarte59058c2008-08-24 21:49:00 -04001838/**
James Smart618a5232012-06-12 13:54:36 -04001839 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1840 * @phba: pointer to lpfc hba data structure.
1841 * @mbx_action: flag for mailbox shutdown action.
Lee Jonesfe614ac2020-07-23 13:24:22 +01001842 * @en_rn_msg: send reset/port recovery message.
James Smart618a5232012-06-12 13:54:36 -04001843 * This routine is invoked to perform an SLI4 port PCI function reset in
1844 * response to port status register polling attention. It waits for port
1845 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1846 * During this process, interrupt vectors are freed and later requested
1847 * for handling possible port resource change.
1848 **/
1849static int
James Smarte10b2022014-02-20 09:57:43 -05001850lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1851 bool en_rn_msg)
James Smart618a5232012-06-12 13:54:36 -04001852{
1853 int rc;
1854 uint32_t intr_mode;
James Smarta9978e32021-07-07 11:43:43 -07001855 LPFC_MBOXQ_t *mboxq;
James Smart618a5232012-06-12 13:54:36 -04001856
James Smart27d6ac02018-02-22 08:18:42 -08001857 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
James Smart65791f12016-07-06 12:35:56 -07001858 LPFC_SLI_INTF_IF_TYPE_2) {
1859 /*
1860 * On error status condition, driver need to wait for port
1861 * ready before performing reset.
1862 */
1863 rc = lpfc_sli4_pdev_status_reg_wait(phba);
James Smart0e916ee2016-07-06 12:36:06 -07001864 if (rc)
James Smart65791f12016-07-06 12:35:56 -07001865 return rc;
James Smart618a5232012-06-12 13:54:36 -04001866 }
James Smart0e916ee2016-07-06 12:36:06 -07001867
James Smart65791f12016-07-06 12:35:56 -07001868 /* need reset: attempt for port recovery */
1869 if (en_rn_msg)
James Smart0b3ad322021-01-04 10:02:39 -08001870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart65791f12016-07-06 12:35:56 -07001871 "2887 Reset Needed: Attempting Port "
1872 "Recovery...\n");
James Smart3ba62162021-01-04 10:02:31 -08001873
1874 /* If we are no wait, the HBA has been reset and is not
James Smarta9978e32021-07-07 11:43:43 -07001875 * functional, thus we should clear
1876 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
James Smart3ba62162021-01-04 10:02:31 -08001877 */
1878 if (mbx_action == LPFC_MBX_NO_WAIT) {
1879 spin_lock_irq(&phba->hbalock);
1880 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
James Smarta9978e32021-07-07 11:43:43 -07001881 if (phba->sli.mbox_active) {
1882 mboxq = phba->sli.mbox_active;
1883 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1884 __lpfc_mbox_cmpl_put(phba, mboxq);
1885 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1886 phba->sli.mbox_active = NULL;
1887 }
James Smart3ba62162021-01-04 10:02:31 -08001888 spin_unlock_irq(&phba->hbalock);
1889 }
1890
James Smart65791f12016-07-06 12:35:56 -07001891 lpfc_offline_prep(phba, mbx_action);
James Smartc00f62e2019-08-14 16:57:11 -07001892 lpfc_sli_flush_io_rings(phba);
James Smart65791f12016-07-06 12:35:56 -07001893 lpfc_offline(phba);
1894 /* release interrupt for possible resource change */
1895 lpfc_sli4_disable_intr(phba);
James Smart5a9eeff2018-11-29 16:09:32 -08001896 rc = lpfc_sli_brdrestart(phba);
1897 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07001898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5a9eeff2018-11-29 16:09:32 -08001899 "6309 Failed to restart board\n");
1900 return rc;
1901 }
James Smart65791f12016-07-06 12:35:56 -07001902 /* request and enable interrupt */
1903 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1904 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -07001905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart65791f12016-07-06 12:35:56 -07001906 "3175 Failed to enable interrupt\n");
1907 return -EIO;
1908 }
1909 phba->intr_mode = intr_mode;
1910 rc = lpfc_online(phba);
1911 if (rc == 0)
1912 lpfc_unblock_mgmt_io(phba);
1913
James Smart618a5232012-06-12 13:54:36 -04001914 return rc;
1915}
1916
1917/**
James Smartda0436e2009-05-22 14:51:39 -04001918 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1919 * @phba: pointer to lpfc hba data structure.
1920 *
1921 * This routine is invoked to handle the SLI4 HBA hardware error attention
1922 * conditions.
1923 **/
1924static void
1925lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1926{
1927 struct lpfc_vport *vport = phba->pport;
1928 uint32_t event_data;
1929 struct Scsi_Host *shost;
James Smart2fcee4b2010-12-15 17:57:46 -05001930 uint32_t if_type;
James Smart2e90f4b2011-12-13 13:22:37 -05001931 struct lpfc_register portstat_reg = {0};
1932 uint32_t reg_err1, reg_err2;
1933 uint32_t uerrlo_reg, uemasklo_reg;
James Smart65791f12016-07-06 12:35:56 -07001934 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
James Smarte10b2022014-02-20 09:57:43 -05001935 bool en_rn_msg = true;
James Smart946727d2015-04-07 15:07:09 -04001936 struct temp_event temp_event_data;
James Smart65791f12016-07-06 12:35:56 -07001937 struct lpfc_register portsmphr_reg;
1938 int rc, i;
James Smartda0436e2009-05-22 14:51:39 -04001939
1940 /* If the pci channel is offline, ignore possible errors, since
1941 * we cannot communicate with the pci card anyway.
1942 */
James Smart32a93102019-03-12 16:30:13 -07001943 if (pci_channel_offline(phba->pcidev)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07001944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart32a93102019-03-12 16:30:13 -07001945 "3166 pci channel is offline\n");
1946 lpfc_sli4_offline_eratt(phba);
James Smartda0436e2009-05-22 14:51:39 -04001947 return;
James Smart32a93102019-03-12 16:30:13 -07001948 }
James Smartda0436e2009-05-22 14:51:39 -04001949
James Smart65791f12016-07-06 12:35:56 -07001950 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
James Smart2fcee4b2010-12-15 17:57:46 -05001951 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1952 switch (if_type) {
1953 case LPFC_SLI_INTF_IF_TYPE_0:
James Smart2e90f4b2011-12-13 13:22:37 -05001954 pci_rd_rc1 = lpfc_readl(
1955 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1956 &uerrlo_reg);
1957 pci_rd_rc2 = lpfc_readl(
1958 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1959 &uemasklo_reg);
1960 /* consider PCI bus read error as pci_channel_offline */
1961 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1962 return;
James Smart65791f12016-07-06 12:35:56 -07001963 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1964 lpfc_sli4_offline_eratt(phba);
1965 return;
1966 }
Dick Kennedy372c1872020-06-30 14:50:00 -07001967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart65791f12016-07-06 12:35:56 -07001968 "7623 Checking UE recoverable");
1969
1970 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1971 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1972 &portsmphr_reg.word0))
1973 continue;
1974
1975 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1976 &portsmphr_reg);
1977 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1978 LPFC_PORT_SEM_UE_RECOVERABLE)
1979 break;
1980 /*Sleep for 1Sec, before checking SEMAPHORE */
1981 msleep(1000);
1982 }
1983
Dick Kennedy372c1872020-06-30 14:50:00 -07001984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart65791f12016-07-06 12:35:56 -07001985 "4827 smphr_port_status x%x : Waited %dSec",
1986 smphr_port_status, i);
1987
1988 /* Recoverable UE, reset the HBA device */
1989 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1990 LPFC_PORT_SEM_UE_RECOVERABLE) {
1991 for (i = 0; i < 20; i++) {
1992 msleep(1000);
1993 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1994 &portsmphr_reg.word0) &&
1995 (LPFC_POST_STAGE_PORT_READY ==
1996 bf_get(lpfc_port_smphr_port_status,
1997 &portsmphr_reg))) {
1998 rc = lpfc_sli4_port_sta_fn_reset(phba,
1999 LPFC_MBX_NO_WAIT, en_rn_msg);
2000 if (rc == 0)
2001 return;
Dick Kennedy372c1872020-06-30 14:50:00 -07002002 lpfc_printf_log(phba, KERN_ERR,
2003 LOG_TRACE_EVENT,
James Smart65791f12016-07-06 12:35:56 -07002004 "4215 Failed to recover UE");
2005 break;
2006 }
2007 }
2008 }
Dick Kennedy372c1872020-06-30 14:50:00 -07002009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart65791f12016-07-06 12:35:56 -07002010 "7624 Firmware not ready: Failing UE recovery,"
2011 " waited %dSec", i);
James Smart8c24a4f2019-08-14 16:56:53 -07002012 phba->link_state = LPFC_HBA_ERROR;
James Smart2fcee4b2010-12-15 17:57:46 -05002013 break;
James Smart946727d2015-04-07 15:07:09 -04002014
James Smart2fcee4b2010-12-15 17:57:46 -05002015 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart27d6ac02018-02-22 08:18:42 -08002016 case LPFC_SLI_INTF_IF_TYPE_6:
James Smart2e90f4b2011-12-13 13:22:37 -05002017 pci_rd_rc1 = lpfc_readl(
2018 phba->sli4_hba.u.if_type2.STATUSregaddr,
2019 &portstat_reg.word0);
2020 /* consider PCI bus read error as pci_channel_offline */
James Smart6b5151f2012-01-18 16:24:06 -05002021 if (pci_rd_rc1 == -EIO) {
Dick Kennedy372c1872020-06-30 14:50:00 -07002022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6b5151f2012-01-18 16:24:06 -05002023 "3151 PCI bus read access failure: x%x\n",
2024 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
James Smart32a93102019-03-12 16:30:13 -07002025 lpfc_sli4_offline_eratt(phba);
James Smart2e90f4b2011-12-13 13:22:37 -05002026 return;
James Smart6b5151f2012-01-18 16:24:06 -05002027 }
James Smart2e90f4b2011-12-13 13:22:37 -05002028 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2029 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
James Smart2fcee4b2010-12-15 17:57:46 -05002030 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07002031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2032 "2889 Port Overtemperature event, "
2033 "taking port offline Data: x%x x%x\n",
2034 reg_err1, reg_err2);
James Smart946727d2015-04-07 15:07:09 -04002035
James Smart310429e2016-07-06 12:35:54 -07002036 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
James Smart946727d2015-04-07 15:07:09 -04002037 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2038 temp_event_data.event_code = LPFC_CRIT_TEMP;
2039 temp_event_data.data = 0xFFFFFFFF;
2040
2041 shost = lpfc_shost_from_vport(phba->pport);
2042 fc_host_post_vendor_event(shost, fc_get_event_number(),
2043 sizeof(temp_event_data),
2044 (char *)&temp_event_data,
2045 SCSI_NL_VID_TYPE_PCI
2046 | PCI_VENDOR_ID_EMULEX);
2047
James Smart2fcee4b2010-12-15 17:57:46 -05002048 spin_lock_irq(&phba->hbalock);
2049 phba->over_temp_state = HBA_OVER_TEMP;
2050 spin_unlock_irq(&phba->hbalock);
2051 lpfc_sli4_offline_eratt(phba);
James Smart946727d2015-04-07 15:07:09 -04002052 return;
James Smart2fcee4b2010-12-15 17:57:46 -05002053 }
James Smart2e90f4b2011-12-13 13:22:37 -05002054 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
James Smarte10b2022014-02-20 09:57:43 -05002055 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
Dick Kennedy372c1872020-06-30 14:50:00 -07002056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarte10b2022014-02-20 09:57:43 -05002057 "3143 Port Down: Firmware Update "
2058 "Detected\n");
2059 en_rn_msg = false;
2060 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
James Smart2e90f4b2011-12-13 13:22:37 -05002061 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
Dick Kennedy372c1872020-06-30 14:50:00 -07002062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2e90f4b2011-12-13 13:22:37 -05002063 "3144 Port Down: Debug Dump\n");
2064 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2065 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
Dick Kennedy372c1872020-06-30 14:50:00 -07002066 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2e90f4b2011-12-13 13:22:37 -05002067 "3145 Port Down: Provisioning\n");
James Smart618a5232012-06-12 13:54:36 -04002068
James Smart946727d2015-04-07 15:07:09 -04002069 /* If resets are disabled then leave the HBA alone and return */
2070 if (!phba->cfg_enable_hba_reset)
2071 return;
2072
James Smart618a5232012-06-12 13:54:36 -04002073 /* Check port status register for function reset */
James Smarte10b2022014-02-20 09:57:43 -05002074 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2075 en_rn_msg);
James Smart618a5232012-06-12 13:54:36 -04002076 if (rc == 0) {
2077 /* don't report event on forced debug dump */
2078 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2079 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2080 return;
2081 else
2082 break;
James Smart2fcee4b2010-12-15 17:57:46 -05002083 }
James Smart618a5232012-06-12 13:54:36 -04002084 /* fall through for not able to recover */
Dick Kennedy372c1872020-06-30 14:50:00 -07002085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart8c24a4f2019-08-14 16:56:53 -07002086 "3152 Unrecoverable error\n");
2087 phba->link_state = LPFC_HBA_ERROR;
James Smart2fcee4b2010-12-15 17:57:46 -05002088 break;
2089 case LPFC_SLI_INTF_IF_TYPE_1:
2090 default:
2091 break;
2092 }
James Smart2e90f4b2011-12-13 13:22:37 -05002093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2094 "3123 Report dump event to upper layer\n");
2095 /* Send an internal error event to mgmt application */
2096 lpfc_board_errevt_to_mgmt(phba);
2097
2098 event_data = FC_REG_DUMP_EVENT;
2099 shost = lpfc_shost_from_vport(vport);
2100 fc_host_post_vendor_event(shost, fc_get_event_number(),
2101 sizeof(event_data), (char *) &event_data,
2102 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
James Smartda0436e2009-05-22 14:51:39 -04002103}
2104
2105/**
2106 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2107 * @phba: pointer to lpfc HBA data structure.
2108 *
2109 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2110 * routine from the API jump table function pointer from the lpfc_hba struct.
2111 *
2112 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002113 * 0 - success.
James Smartda0436e2009-05-22 14:51:39 -04002114 * Any other value - error.
2115 **/
2116void
2117lpfc_handle_eratt(struct lpfc_hba *phba)
2118{
2119 (*phba->lpfc_handle_eratt)(phba);
2120}
2121
2122/**
James Smart3621a712009-04-06 18:47:14 -04002123 * lpfc_handle_latt - The HBA link event handler
James Smarte59058c2008-08-24 21:49:00 -04002124 * @phba: pointer to lpfc hba data structure.
2125 *
2126 * This routine is invoked from the worker thread to handle a HBA host
James Smart895427b2017-02-12 13:52:30 -08002127 * attention link event. SLI3 only.
James Smarte59058c2008-08-24 21:49:00 -04002128 **/
dea31012005-04-17 16:05:31 -05002129void
James Smart2e0fef82007-06-17 19:56:36 -05002130lpfc_handle_latt(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002131{
James Smart2e0fef82007-06-17 19:56:36 -05002132 struct lpfc_vport *vport = phba->pport;
2133 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05002134 LPFC_MBOXQ_t *pmb;
2135 volatile uint32_t control;
2136 struct lpfc_dmabuf *mp;
James Smart09372822008-01-11 01:52:54 -05002137 int rc = 0;
dea31012005-04-17 16:05:31 -05002138
2139 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart09372822008-01-11 01:52:54 -05002140 if (!pmb) {
2141 rc = 1;
dea31012005-04-17 16:05:31 -05002142 goto lpfc_handle_latt_err_exit;
James Smart09372822008-01-11 01:52:54 -05002143 }
dea31012005-04-17 16:05:31 -05002144
2145 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
James Smart09372822008-01-11 01:52:54 -05002146 if (!mp) {
2147 rc = 2;
dea31012005-04-17 16:05:31 -05002148 goto lpfc_handle_latt_free_pmb;
James Smart09372822008-01-11 01:52:54 -05002149 }
dea31012005-04-17 16:05:31 -05002150
2151 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
James Smart09372822008-01-11 01:52:54 -05002152 if (!mp->virt) {
2153 rc = 3;
dea31012005-04-17 16:05:31 -05002154 goto lpfc_handle_latt_free_mp;
James Smart09372822008-01-11 01:52:54 -05002155 }
dea31012005-04-17 16:05:31 -05002156
James.Smart@Emulex.Com6281bfe2005-11-28 11:41:33 -05002157 /* Cleanup any outstanding ELS commands */
James Smart549e55c2007-08-02 11:09:51 -04002158 lpfc_els_flush_all_cmd(phba);
dea31012005-04-17 16:05:31 -05002159
2160 psli->slistat.link_event++;
James Smart76a95d72010-11-20 23:11:48 -05002161 lpfc_read_topology(phba, pmb, mp);
2162 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
James Smart2e0fef82007-06-17 19:56:36 -05002163 pmb->vport = vport;
James Smart0d2b6b82008-06-14 22:52:47 -04002164 /* Block ELS IOCBs until we have processed this mbox command */
James Smart895427b2017-02-12 13:52:30 -08002165 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
James Smart0b727fe2007-10-27 13:37:25 -04002166 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
James Smart09372822008-01-11 01:52:54 -05002167 if (rc == MBX_NOT_FINISHED) {
2168 rc = 4;
James Smart14691152006-12-02 13:34:28 -05002169 goto lpfc_handle_latt_free_mbuf;
James Smart09372822008-01-11 01:52:54 -05002170 }
dea31012005-04-17 16:05:31 -05002171
2172 /* Clear Link Attention in HA REG */
James Smart2e0fef82007-06-17 19:56:36 -05002173 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05002174 writel(HA_LATT, phba->HAregaddr);
2175 readl(phba->HAregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05002176 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05002177
2178 return;
2179
James Smart14691152006-12-02 13:34:28 -05002180lpfc_handle_latt_free_mbuf:
James Smart895427b2017-02-12 13:52:30 -08002181 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
James Smart14691152006-12-02 13:34:28 -05002182 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea31012005-04-17 16:05:31 -05002183lpfc_handle_latt_free_mp:
2184 kfree(mp);
2185lpfc_handle_latt_free_pmb:
James Smart1dcb58e2007-04-25 09:51:30 -04002186 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -05002187lpfc_handle_latt_err_exit:
2188 /* Enable Link attention interrupts */
James Smart2e0fef82007-06-17 19:56:36 -05002189 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05002190 psli->sli_flag |= LPFC_PROCESS_LA;
2191 control = readl(phba->HCregaddr);
2192 control |= HC_LAINT_ENA;
2193 writel(control, phba->HCregaddr);
2194 readl(phba->HCregaddr); /* flush */
2195
2196 /* Clear Link Attention in HA REG */
2197 writel(HA_LATT, phba->HAregaddr);
2198 readl(phba->HAregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05002199 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05002200 lpfc_linkdown(phba);
James Smart2e0fef82007-06-17 19:56:36 -05002201 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05002202
Dick Kennedy372c1872020-06-30 14:50:00 -07002203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2204 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea31012005-04-17 16:05:31 -05002205
2206 return;
2207}
2208
James Smarte59058c2008-08-24 21:49:00 -04002209/**
James Smart3621a712009-04-06 18:47:14 -04002210 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
James Smarte59058c2008-08-24 21:49:00 -04002211 * @phba: pointer to lpfc hba data structure.
2212 * @vpd: pointer to the vital product data.
2213 * @len: length of the vital product data in bytes.
2214 *
2215 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2216 * an array of characters. In this routine, the ModelName, ProgramType, and
2217 * ModelDesc, etc. fields of the phba data structure will be populated.
2218 *
2219 * Return codes
2220 * 0 - pointer to the VPD passed in is NULL
2221 * 1 - success
2222 **/
James Smart3772a992009-05-22 14:50:54 -04002223int
James Smart2e0fef82007-06-17 19:56:36 -05002224lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea31012005-04-17 16:05:31 -05002225{
2226 uint8_t lenlo, lenhi;
Anton Blanchard07da60c2007-03-21 08:41:47 -05002227 int Length;
dea31012005-04-17 16:05:31 -05002228 int i, j;
2229 int finished = 0;
2230 int index = 0;
2231
2232 if (!vpd)
2233 return 0;
2234
2235 /* Vital Product */
James Smarted957682007-06-17 19:56:37 -05002236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04002237 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea31012005-04-17 16:05:31 -05002238 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2239 (uint32_t) vpd[3]);
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002240 while (!finished && (index < (len - 4))) {
dea31012005-04-17 16:05:31 -05002241 switch (vpd[index]) {
2242 case 0x82:
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002243 case 0x91:
dea31012005-04-17 16:05:31 -05002244 index += 1;
2245 lenlo = vpd[index];
2246 index += 1;
2247 lenhi = vpd[index];
2248 index += 1;
2249 i = ((((unsigned short)lenhi) << 8) + lenlo);
2250 index += i;
2251 break;
2252 case 0x90:
2253 index += 1;
2254 lenlo = vpd[index];
2255 index += 1;
2256 lenhi = vpd[index];
2257 index += 1;
2258 Length = ((((unsigned short)lenhi) << 8) + lenlo);
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002259 if (Length > len - index)
2260 Length = len - index;
dea31012005-04-17 16:05:31 -05002261 while (Length > 0) {
2262 /* Look for Serial Number */
2263 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2264 index += 2;
2265 i = vpd[index];
2266 index += 1;
2267 j = 0;
2268 Length -= (3+i);
2269 while(i--) {
2270 phba->SerialNumber[j++] = vpd[index++];
2271 if (j == 31)
2272 break;
2273 }
2274 phba->SerialNumber[j] = 0;
2275 continue;
2276 }
2277 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2278 phba->vpd_flag |= VPD_MODEL_DESC;
2279 index += 2;
2280 i = vpd[index];
2281 index += 1;
2282 j = 0;
2283 Length -= (3+i);
2284 while(i--) {
2285 phba->ModelDesc[j++] = vpd[index++];
2286 if (j == 255)
2287 break;
2288 }
2289 phba->ModelDesc[j] = 0;
2290 continue;
2291 }
2292 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2293 phba->vpd_flag |= VPD_MODEL_NAME;
2294 index += 2;
2295 i = vpd[index];
2296 index += 1;
2297 j = 0;
2298 Length -= (3+i);
2299 while(i--) {
2300 phba->ModelName[j++] = vpd[index++];
2301 if (j == 79)
2302 break;
2303 }
2304 phba->ModelName[j] = 0;
2305 continue;
2306 }
2307 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2308 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2309 index += 2;
2310 i = vpd[index];
2311 index += 1;
2312 j = 0;
2313 Length -= (3+i);
2314 while(i--) {
2315 phba->ProgramType[j++] = vpd[index++];
2316 if (j == 255)
2317 break;
2318 }
2319 phba->ProgramType[j] = 0;
2320 continue;
2321 }
2322 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2323 phba->vpd_flag |= VPD_PORT;
2324 index += 2;
2325 i = vpd[index];
2326 index += 1;
2327 j = 0;
2328 Length -= (3+i);
2329 while(i--) {
James Smartcd1c8302011-10-10 21:33:25 -04002330 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2331 (phba->sli4_hba.pport_name_sta ==
2332 LPFC_SLI4_PPNAME_GET)) {
2333 j++;
2334 index++;
2335 } else
2336 phba->Port[j++] = vpd[index++];
2337 if (j == 19)
2338 break;
dea31012005-04-17 16:05:31 -05002339 }
James Smartcd1c8302011-10-10 21:33:25 -04002340 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2341 (phba->sli4_hba.pport_name_sta ==
2342 LPFC_SLI4_PPNAME_NON))
2343 phba->Port[j] = 0;
dea31012005-04-17 16:05:31 -05002344 continue;
2345 }
2346 else {
2347 index += 2;
2348 i = vpd[index];
2349 index += 1;
2350 index += i;
2351 Length -= (3 + i);
2352 }
2353 }
2354 finished = 0;
2355 break;
2356 case 0x78:
2357 finished = 1;
2358 break;
2359 default:
2360 index ++;
2361 break;
2362 }
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002363 }
dea31012005-04-17 16:05:31 -05002364
2365 return(1);
2366}
2367
James Smarte59058c2008-08-24 21:49:00 -04002368/**
James Smart3621a712009-04-06 18:47:14 -04002369 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
James Smarte59058c2008-08-24 21:49:00 -04002370 * @phba: pointer to lpfc hba data structure.
2371 * @mdp: pointer to the data structure to hold the derived model name.
2372 * @descp: pointer to the data structure to hold the derived description.
2373 *
2374 * This routine retrieves HBA's description based on its registered PCI device
2375 * ID. The @descp passed into this function points to an array of 256 chars. It
2376 * shall be returned with the model name, maximum speed, and the host bus type.
2377 * The @mdp passed into this function points to an array of 80 chars. When the
2378 * function returns, the @mdp will be filled with the model name.
2379 **/
dea31012005-04-17 16:05:31 -05002380static void
James Smart2e0fef82007-06-17 19:56:36 -05002381lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea31012005-04-17 16:05:31 -05002382{
2383 lpfc_vpd_t *vp;
James.Smart@Emulex.Comfefcb2b2005-11-28 15:08:56 -05002384 uint16_t dev_id = phba->pcidev->device;
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002385 int max_speed;
James Smart84774a42008-08-24 21:50:06 -04002386 int GE = 0;
James Smartda0436e2009-05-22 14:51:39 -04002387 int oneConnect = 0; /* default is not a oneConnect */
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002388 struct {
James Smarta747c9c2009-11-18 15:41:10 -05002389 char *name;
2390 char *bus;
2391 char *function;
2392 } m = {"<Unknown>", "", ""};
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002393
2394 if (mdp && mdp[0] != '\0'
2395 && descp && descp[0] != '\0')
2396 return;
2397
James Smartfbd8a6b2018-02-22 08:18:45 -08002398 if (phba->lmt & LMT_64Gb)
2399 max_speed = 64;
2400 else if (phba->lmt & LMT_32Gb)
James Smartd38dd522015-08-31 16:48:17 -04002401 max_speed = 32;
2402 else if (phba->lmt & LMT_16Gb)
James Smartc0c11512011-05-24 11:41:34 -04002403 max_speed = 16;
2404 else if (phba->lmt & LMT_10Gb)
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002405 max_speed = 10;
2406 else if (phba->lmt & LMT_8Gb)
2407 max_speed = 8;
2408 else if (phba->lmt & LMT_4Gb)
2409 max_speed = 4;
2410 else if (phba->lmt & LMT_2Gb)
2411 max_speed = 2;
James Smart4169d862012-09-29 11:32:09 -04002412 else if (phba->lmt & LMT_1Gb)
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002413 max_speed = 1;
James Smart4169d862012-09-29 11:32:09 -04002414 else
2415 max_speed = 0;
dea31012005-04-17 16:05:31 -05002416
2417 vp = &phba->vpd;
dea31012005-04-17 16:05:31 -05002418
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002419 switch (dev_id) {
James.Smart@Emulex.Com06325e72005-06-25 10:34:22 -04002420 case PCI_DEVICE_ID_FIREFLY:
James Smart12222f42014-05-21 08:05:19 -04002421 m = (typeof(m)){"LP6000", "PCI",
2422 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Com06325e72005-06-25 10:34:22 -04002423 break;
dea31012005-04-17 16:05:31 -05002424 case PCI_DEVICE_ID_SUPERFLY:
2425 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
James Smart12222f42014-05-21 08:05:19 -04002426 m = (typeof(m)){"LP7000", "PCI", ""};
dea31012005-04-17 16:05:31 -05002427 else
James Smart12222f42014-05-21 08:05:19 -04002428 m = (typeof(m)){"LP7000E", "PCI", ""};
2429 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea31012005-04-17 16:05:31 -05002430 break;
2431 case PCI_DEVICE_ID_DRAGONFLY:
James Smarta747c9c2009-11-18 15:41:10 -05002432 m = (typeof(m)){"LP8000", "PCI",
James Smart12222f42014-05-21 08:05:19 -04002433 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002434 break;
2435 case PCI_DEVICE_ID_CENTAUR:
2436 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
James Smart12222f42014-05-21 08:05:19 -04002437 m = (typeof(m)){"LP9002", "PCI", ""};
dea31012005-04-17 16:05:31 -05002438 else
James Smart12222f42014-05-21 08:05:19 -04002439 m = (typeof(m)){"LP9000", "PCI", ""};
2440 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea31012005-04-17 16:05:31 -05002441 break;
2442 case PCI_DEVICE_ID_RFLY:
James Smarta747c9c2009-11-18 15:41:10 -05002443 m = (typeof(m)){"LP952", "PCI",
James Smart12222f42014-05-21 08:05:19 -04002444 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002445 break;
2446 case PCI_DEVICE_ID_PEGASUS:
James Smarta747c9c2009-11-18 15:41:10 -05002447 m = (typeof(m)){"LP9802", "PCI-X",
James Smart12222f42014-05-21 08:05:19 -04002448 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002449 break;
2450 case PCI_DEVICE_ID_THOR:
James Smarta747c9c2009-11-18 15:41:10 -05002451 m = (typeof(m)){"LP10000", "PCI-X",
James Smart12222f42014-05-21 08:05:19 -04002452 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002453 break;
2454 case PCI_DEVICE_ID_VIPER:
James Smarta747c9c2009-11-18 15:41:10 -05002455 m = (typeof(m)){"LPX1000", "PCI-X",
James Smart12222f42014-05-21 08:05:19 -04002456 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002457 break;
2458 case PCI_DEVICE_ID_PFLY:
James Smarta747c9c2009-11-18 15:41:10 -05002459 m = (typeof(m)){"LP982", "PCI-X",
James Smart12222f42014-05-21 08:05:19 -04002460 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002461 break;
2462 case PCI_DEVICE_ID_TFLY:
James Smarta747c9c2009-11-18 15:41:10 -05002463 m = (typeof(m)){"LP1050", "PCI-X",
James Smart12222f42014-05-21 08:05:19 -04002464 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002465 break;
2466 case PCI_DEVICE_ID_HELIOS:
James Smarta747c9c2009-11-18 15:41:10 -05002467 m = (typeof(m)){"LP11000", "PCI-X2",
James Smart12222f42014-05-21 08:05:19 -04002468 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002469 break;
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002470 case PCI_DEVICE_ID_HELIOS_SCSP:
James Smarta747c9c2009-11-18 15:41:10 -05002471 m = (typeof(m)){"LP11000-SP", "PCI-X2",
James Smart12222f42014-05-21 08:05:19 -04002472 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002473 break;
2474 case PCI_DEVICE_ID_HELIOS_DCSP:
James Smarta747c9c2009-11-18 15:41:10 -05002475 m = (typeof(m)){"LP11002-SP", "PCI-X2",
James Smart12222f42014-05-21 08:05:19 -04002476 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002477 break;
2478 case PCI_DEVICE_ID_NEPTUNE:
James Smart12222f42014-05-21 08:05:19 -04002479 m = (typeof(m)){"LPe1000", "PCIe",
2480 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002481 break;
2482 case PCI_DEVICE_ID_NEPTUNE_SCSP:
James Smart12222f42014-05-21 08:05:19 -04002483 m = (typeof(m)){"LPe1000-SP", "PCIe",
2484 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002485 break;
2486 case PCI_DEVICE_ID_NEPTUNE_DCSP:
James Smart12222f42014-05-21 08:05:19 -04002487 m = (typeof(m)){"LPe1002-SP", "PCIe",
2488 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002489 break;
dea31012005-04-17 16:05:31 -05002490 case PCI_DEVICE_ID_BMID:
James Smarta747c9c2009-11-18 15:41:10 -05002491 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002492 break;
2493 case PCI_DEVICE_ID_BSMB:
James Smart12222f42014-05-21 08:05:19 -04002494 m = (typeof(m)){"LP111", "PCI-X2",
2495 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002496 break;
2497 case PCI_DEVICE_ID_ZEPHYR:
James Smarta747c9c2009-11-18 15:41:10 -05002498 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002499 break;
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002500 case PCI_DEVICE_ID_ZEPHYR_SCSP:
James Smarta747c9c2009-11-18 15:41:10 -05002501 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002502 break;
2503 case PCI_DEVICE_ID_ZEPHYR_DCSP:
James Smarta747c9c2009-11-18 15:41:10 -05002504 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
James Smarta257bf92009-04-06 18:48:10 -04002505 GE = 1;
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002506 break;
dea31012005-04-17 16:05:31 -05002507 case PCI_DEVICE_ID_ZMID:
James Smarta747c9c2009-11-18 15:41:10 -05002508 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002509 break;
2510 case PCI_DEVICE_ID_ZSMB:
James Smarta747c9c2009-11-18 15:41:10 -05002511 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002512 break;
2513 case PCI_DEVICE_ID_LP101:
James Smart12222f42014-05-21 08:05:19 -04002514 m = (typeof(m)){"LP101", "PCI-X",
2515 "Obsolete, Unsupported Fibre Channel Adapter"};
dea31012005-04-17 16:05:31 -05002516 break;
2517 case PCI_DEVICE_ID_LP10000S:
James Smart12222f42014-05-21 08:05:19 -04002518 m = (typeof(m)){"LP10000-S", "PCI",
2519 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Com06325e72005-06-25 10:34:22 -04002520 break;
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002521 case PCI_DEVICE_ID_LP11000S:
James Smart12222f42014-05-21 08:05:19 -04002522 m = (typeof(m)){"LP11000-S", "PCI-X2",
2523 "Obsolete, Unsupported Fibre Channel Adapter"};
James Smart18a3b592006-12-02 13:35:08 -05002524 break;
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002525 case PCI_DEVICE_ID_LPE11000S:
James Smart12222f42014-05-21 08:05:19 -04002526 m = (typeof(m)){"LPe11000-S", "PCIe",
2527 "Obsolete, Unsupported Fibre Channel Adapter"};
James.Smart@Emulex.Com5cc36b32005-11-28 11:42:19 -05002528 break;
James Smartb87eab32007-04-25 09:53:28 -04002529 case PCI_DEVICE_ID_SAT:
James Smarta747c9c2009-11-18 15:41:10 -05002530 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
James Smartb87eab32007-04-25 09:53:28 -04002531 break;
2532 case PCI_DEVICE_ID_SAT_MID:
James Smarta747c9c2009-11-18 15:41:10 -05002533 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
James Smartb87eab32007-04-25 09:53:28 -04002534 break;
2535 case PCI_DEVICE_ID_SAT_SMB:
James Smarta747c9c2009-11-18 15:41:10 -05002536 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
James Smartb87eab32007-04-25 09:53:28 -04002537 break;
2538 case PCI_DEVICE_ID_SAT_DCSP:
James Smarta747c9c2009-11-18 15:41:10 -05002539 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
James Smartb87eab32007-04-25 09:53:28 -04002540 break;
2541 case PCI_DEVICE_ID_SAT_SCSP:
James Smarta747c9c2009-11-18 15:41:10 -05002542 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
James Smartb87eab32007-04-25 09:53:28 -04002543 break;
2544 case PCI_DEVICE_ID_SAT_S:
James Smarta747c9c2009-11-18 15:41:10 -05002545 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
James Smartb87eab32007-04-25 09:53:28 -04002546 break;
James Smart84774a42008-08-24 21:50:06 -04002547 case PCI_DEVICE_ID_HORNET:
James Smart12222f42014-05-21 08:05:19 -04002548 m = (typeof(m)){"LP21000", "PCIe",
2549 "Obsolete, Unsupported FCoE Adapter"};
James Smart84774a42008-08-24 21:50:06 -04002550 GE = 1;
2551 break;
2552 case PCI_DEVICE_ID_PROTEUS_VF:
James Smarta747c9c2009-11-18 15:41:10 -05002553 m = (typeof(m)){"LPev12000", "PCIe IOV",
James Smart12222f42014-05-21 08:05:19 -04002554 "Obsolete, Unsupported Fibre Channel Adapter"};
James Smart84774a42008-08-24 21:50:06 -04002555 break;
2556 case PCI_DEVICE_ID_PROTEUS_PF:
James Smarta747c9c2009-11-18 15:41:10 -05002557 m = (typeof(m)){"LPev12000", "PCIe IOV",
James Smart12222f42014-05-21 08:05:19 -04002558 "Obsolete, Unsupported Fibre Channel Adapter"};
James Smart84774a42008-08-24 21:50:06 -04002559 break;
2560 case PCI_DEVICE_ID_PROTEUS_S:
James Smarta747c9c2009-11-18 15:41:10 -05002561 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
James Smart12222f42014-05-21 08:05:19 -04002562 "Obsolete, Unsupported Fibre Channel Adapter"};
James Smart84774a42008-08-24 21:50:06 -04002563 break;
James Smartda0436e2009-05-22 14:51:39 -04002564 case PCI_DEVICE_ID_TIGERSHARK:
2565 oneConnect = 1;
James Smarta747c9c2009-11-18 15:41:10 -05002566 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
James Smartda0436e2009-05-22 14:51:39 -04002567 break;
James Smarta747c9c2009-11-18 15:41:10 -05002568 case PCI_DEVICE_ID_TOMCAT:
James Smart6669f9b2009-10-02 15:16:45 -04002569 oneConnect = 1;
James Smarta747c9c2009-11-18 15:41:10 -05002570 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2571 break;
2572 case PCI_DEVICE_ID_FALCON:
2573 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2574 "EmulexSecure Fibre"};
James Smart6669f9b2009-10-02 15:16:45 -04002575 break;
James Smart98fc5dd2010-06-07 15:24:29 -04002576 case PCI_DEVICE_ID_BALIUS:
2577 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
James Smart12222f42014-05-21 08:05:19 -04002578 "Obsolete, Unsupported Fibre Channel Adapter"};
James Smart98fc5dd2010-06-07 15:24:29 -04002579 break;
James Smart085c6472010-11-20 23:11:37 -05002580 case PCI_DEVICE_ID_LANCER_FC:
James Smartc0c11512011-05-24 11:41:34 -04002581 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
James Smart085c6472010-11-20 23:11:37 -05002582 break;
James Smart12222f42014-05-21 08:05:19 -04002583 case PCI_DEVICE_ID_LANCER_FC_VF:
2584 m = (typeof(m)){"LPe16000", "PCIe",
2585 "Obsolete, Unsupported Fibre Channel Adapter"};
2586 break;
James Smart085c6472010-11-20 23:11:37 -05002587 case PCI_DEVICE_ID_LANCER_FCOE:
2588 oneConnect = 1;
James Smart079b5c92011-08-21 21:48:49 -04002589 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
James Smart085c6472010-11-20 23:11:37 -05002590 break;
James Smart12222f42014-05-21 08:05:19 -04002591 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2592 oneConnect = 1;
2593 m = (typeof(m)){"OCe15100", "PCIe",
2594 "Obsolete, Unsupported FCoE"};
2595 break;
James Smartd38dd522015-08-31 16:48:17 -04002596 case PCI_DEVICE_ID_LANCER_G6_FC:
2597 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2598 break;
James Smartc238b9b2018-02-22 08:18:44 -08002599 case PCI_DEVICE_ID_LANCER_G7_FC:
2600 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2601 break;
James Smartf8cafd32012-08-03 12:42:51 -04002602 case PCI_DEVICE_ID_SKYHAWK:
2603 case PCI_DEVICE_ID_SKYHAWK_VF:
2604 oneConnect = 1;
2605 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2606 break;
James.Smart@Emulex.Com5cc36b32005-11-28 11:42:19 -05002607 default:
James Smarta747c9c2009-11-18 15:41:10 -05002608 m = (typeof(m)){"Unknown", "", ""};
James.Smart@Emulex.Come4adb202005-11-28 11:42:12 -05002609 break;
dea31012005-04-17 16:05:31 -05002610 }
Jamie Wellnitz74b72a52006-02-28 22:33:04 -05002611
2612 if (mdp && mdp[0] == '\0')
2613 snprintf(mdp, 79,"%s", m.name);
James Smartc0c11512011-05-24 11:41:34 -04002614 /*
2615 * oneConnect hba requires special processing, they are all initiators
James Smartda0436e2009-05-22 14:51:39 -04002616 * and we put the port number on the end
2617 */
2618 if (descp && descp[0] == '\0') {
2619 if (oneConnect)
2620 snprintf(descp, 255,
James Smart4169d862012-09-29 11:32:09 -04002621 "Emulex OneConnect %s, %s Initiator %s",
James Smarta747c9c2009-11-18 15:41:10 -05002622 m.name, m.function,
James Smartda0436e2009-05-22 14:51:39 -04002623 phba->Port);
James Smart4169d862012-09-29 11:32:09 -04002624 else if (max_speed == 0)
2625 snprintf(descp, 255,
Sebastian Herbszt290237d2015-08-31 16:48:08 -04002626 "Emulex %s %s %s",
James Smart4169d862012-09-29 11:32:09 -04002627 m.name, m.bus, m.function);
James Smartda0436e2009-05-22 14:51:39 -04002628 else
2629 snprintf(descp, 255,
2630 "Emulex %s %d%s %s %s",
James Smarta747c9c2009-11-18 15:41:10 -05002631 m.name, max_speed, (GE) ? "GE" : "Gb",
2632 m.bus, m.function);
James Smartda0436e2009-05-22 14:51:39 -04002633 }
dea31012005-04-17 16:05:31 -05002634}
2635
James Smarte59058c2008-08-24 21:49:00 -04002636/**
James Smart3621a712009-04-06 18:47:14 -04002637 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
James Smarte59058c2008-08-24 21:49:00 -04002638 * @phba: pointer to lpfc hba data structure.
2639 * @pring: pointer to a IOCB ring.
2640 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2641 *
2642 * This routine posts a given number of IOCBs with the associated DMA buffer
2643 * descriptors specified by the cnt argument to the given IOCB ring.
2644 *
2645 * Return codes
2646 * The number of IOCBs NOT able to be posted to the IOCB ring.
2647 **/
dea31012005-04-17 16:05:31 -05002648int
James Smart495a7142008-06-14 22:52:59 -04002649lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea31012005-04-17 16:05:31 -05002650{
2651 IOCB_t *icmd;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04002652 struct lpfc_iocbq *iocb;
dea31012005-04-17 16:05:31 -05002653 struct lpfc_dmabuf *mp1, *mp2;
2654
2655 cnt += pring->missbufcnt;
2656
2657 /* While there are buffers to post */
2658 while (cnt > 0) {
2659 /* Allocate buffer for command iocb */
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04002660 iocb = lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05002661 if (iocb == NULL) {
2662 pring->missbufcnt = cnt;
2663 return cnt;
2664 }
dea31012005-04-17 16:05:31 -05002665 icmd = &iocb->iocb;
2666
2667 /* 2 buffers can be posted per command */
2668 /* Allocate buffer to post */
2669 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2670 if (mp1)
James Smart98c9ea52007-10-27 13:37:33 -04002671 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2672 if (!mp1 || !mp1->virt) {
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002673 kfree(mp1);
James Bottomley604a3e32005-10-29 10:28:33 -05002674 lpfc_sli_release_iocbq(phba, iocb);
dea31012005-04-17 16:05:31 -05002675 pring->missbufcnt = cnt;
2676 return cnt;
2677 }
2678
2679 INIT_LIST_HEAD(&mp1->list);
2680 /* Allocate buffer to post */
2681 if (cnt > 1) {
2682 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2683 if (mp2)
2684 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2685 &mp2->phys);
James Smart98c9ea52007-10-27 13:37:33 -04002686 if (!mp2 || !mp2->virt) {
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002687 kfree(mp2);
dea31012005-04-17 16:05:31 -05002688 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2689 kfree(mp1);
James Bottomley604a3e32005-10-29 10:28:33 -05002690 lpfc_sli_release_iocbq(phba, iocb);
dea31012005-04-17 16:05:31 -05002691 pring->missbufcnt = cnt;
2692 return cnt;
2693 }
2694
2695 INIT_LIST_HEAD(&mp2->list);
2696 } else {
2697 mp2 = NULL;
2698 }
2699
2700 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2701 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2702 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2703 icmd->ulpBdeCount = 1;
2704 cnt--;
2705 if (mp2) {
2706 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2707 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2708 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2709 cnt--;
2710 icmd->ulpBdeCount = 2;
2711 }
2712
2713 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2714 icmd->ulpLe = 1;
2715
James Smart3772a992009-05-22 14:50:54 -04002716 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2717 IOCB_ERROR) {
dea31012005-04-17 16:05:31 -05002718 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2719 kfree(mp1);
2720 cnt++;
2721 if (mp2) {
2722 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2723 kfree(mp2);
2724 cnt++;
2725 }
James Bottomley604a3e32005-10-29 10:28:33 -05002726 lpfc_sli_release_iocbq(phba, iocb);
dea31012005-04-17 16:05:31 -05002727 pring->missbufcnt = cnt;
dea31012005-04-17 16:05:31 -05002728 return cnt;
2729 }
dea31012005-04-17 16:05:31 -05002730 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
James Smart92d7f7b2007-06-17 19:56:38 -05002731 if (mp2)
dea31012005-04-17 16:05:31 -05002732 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea31012005-04-17 16:05:31 -05002733 }
2734 pring->missbufcnt = 0;
2735 return 0;
2736}
2737
James Smarte59058c2008-08-24 21:49:00 -04002738/**
James Smart3621a712009-04-06 18:47:14 -04002739 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
James Smarte59058c2008-08-24 21:49:00 -04002740 * @phba: pointer to lpfc hba data structure.
2741 *
2742 * This routine posts initial receive IOCB buffers to the ELS ring. The
2743 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
James Smart895427b2017-02-12 13:52:30 -08002744 * set to 64 IOCBs. SLI3 only.
James Smarte59058c2008-08-24 21:49:00 -04002745 *
2746 * Return codes
2747 * 0 - success (currently always success)
2748 **/
dea31012005-04-17 16:05:31 -05002749static int
James Smart2e0fef82007-06-17 19:56:36 -05002750lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002751{
2752 struct lpfc_sli *psli = &phba->sli;
2753
2754 /* Ring 0, ELS / CT buffers */
James Smart895427b2017-02-12 13:52:30 -08002755 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea31012005-04-17 16:05:31 -05002756 /* Ring 2 - FCP no buffers needed */
2757
2758 return 0;
2759}
2760
2761#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2762
James Smarte59058c2008-08-24 21:49:00 -04002763/**
James Smart3621a712009-04-06 18:47:14 -04002764 * lpfc_sha_init - Set up initial array of hash table entries
James Smarte59058c2008-08-24 21:49:00 -04002765 * @HashResultPointer: pointer to an array as hash table.
2766 *
2767 * This routine sets up the initial values to the array of hash table entries
2768 * for the LC HBAs.
2769 **/
dea31012005-04-17 16:05:31 -05002770static void
2771lpfc_sha_init(uint32_t * HashResultPointer)
2772{
2773 HashResultPointer[0] = 0x67452301;
2774 HashResultPointer[1] = 0xEFCDAB89;
2775 HashResultPointer[2] = 0x98BADCFE;
2776 HashResultPointer[3] = 0x10325476;
2777 HashResultPointer[4] = 0xC3D2E1F0;
2778}
2779
James Smarte59058c2008-08-24 21:49:00 -04002780/**
James Smart3621a712009-04-06 18:47:14 -04002781 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
James Smarte59058c2008-08-24 21:49:00 -04002782 * @HashResultPointer: pointer to an initial/result hash table.
2783 * @HashWorkingPointer: pointer to an working hash table.
2784 *
2785 * This routine iterates an initial hash table pointed by @HashResultPointer
2786 * with the values from the working hash table pointeed by @HashWorkingPointer.
2787 * The results are putting back to the initial hash table, returned through
2788 * the @HashResultPointer as the result hash table.
2789 **/
dea31012005-04-17 16:05:31 -05002790static void
2791lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2792{
2793 int t;
2794 uint32_t TEMP;
2795 uint32_t A, B, C, D, E;
2796 t = 16;
2797 do {
2798 HashWorkingPointer[t] =
2799 S(1,
2800 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2801 8] ^
2802 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2803 } while (++t <= 79);
2804 t = 0;
2805 A = HashResultPointer[0];
2806 B = HashResultPointer[1];
2807 C = HashResultPointer[2];
2808 D = HashResultPointer[3];
2809 E = HashResultPointer[4];
2810
2811 do {
2812 if (t < 20) {
2813 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2814 } else if (t < 40) {
2815 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2816 } else if (t < 60) {
2817 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2818 } else {
2819 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2820 }
2821 TEMP += S(5, A) + E + HashWorkingPointer[t];
2822 E = D;
2823 D = C;
2824 C = S(30, B);
2825 B = A;
2826 A = TEMP;
2827 } while (++t <= 79);
2828
2829 HashResultPointer[0] += A;
2830 HashResultPointer[1] += B;
2831 HashResultPointer[2] += C;
2832 HashResultPointer[3] += D;
2833 HashResultPointer[4] += E;
2834
2835}
2836
James Smarte59058c2008-08-24 21:49:00 -04002837/**
James Smart3621a712009-04-06 18:47:14 -04002838 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
James Smarte59058c2008-08-24 21:49:00 -04002839 * @RandomChallenge: pointer to the entry of host challenge random number array.
2840 * @HashWorking: pointer to the entry of the working hash array.
2841 *
2842 * This routine calculates the working hash array referred by @HashWorking
2843 * from the challenge random numbers associated with the host, referred by
2844 * @RandomChallenge. The result is put into the entry of the working hash
2845 * array and returned by reference through @HashWorking.
2846 **/
dea31012005-04-17 16:05:31 -05002847static void
2848lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2849{
2850 *HashWorking = (*RandomChallenge ^ *HashWorking);
2851}
2852
James Smarte59058c2008-08-24 21:49:00 -04002853/**
James Smart3621a712009-04-06 18:47:14 -04002854 * lpfc_hba_init - Perform special handling for LC HBA initialization
James Smarte59058c2008-08-24 21:49:00 -04002855 * @phba: pointer to lpfc hba data structure.
2856 * @hbainit: pointer to an array of unsigned 32-bit integers.
2857 *
2858 * This routine performs the special handling for LC HBA initialization.
2859 **/
dea31012005-04-17 16:05:31 -05002860void
2861lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2862{
2863 int t;
2864 uint32_t *HashWorking;
James Smart2e0fef82007-06-17 19:56:36 -05002865 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea31012005-04-17 16:05:31 -05002866
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002867 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea31012005-04-17 16:05:31 -05002868 if (!HashWorking)
2869 return;
2870
dea31012005-04-17 16:05:31 -05002871 HashWorking[0] = HashWorking[78] = *pwwnn++;
2872 HashWorking[1] = HashWorking[79] = *pwwnn;
2873
2874 for (t = 0; t < 7; t++)
2875 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2876
2877 lpfc_sha_init(hbainit);
2878 lpfc_sha_iterate(hbainit, HashWorking);
2879 kfree(HashWorking);
2880}
2881
James Smarte59058c2008-08-24 21:49:00 -04002882/**
James Smart3621a712009-04-06 18:47:14 -04002883 * lpfc_cleanup - Performs vport cleanups before deleting a vport
James Smarte59058c2008-08-24 21:49:00 -04002884 * @vport: pointer to a virtual N_Port data structure.
2885 *
2886 * This routine performs the necessary cleanups before deleting the @vport.
2887 * It invokes the discovery state machine to perform necessary state
2888 * transitions and to release the ndlps associated with the @vport. Note,
2889 * the physical port is treated as @vport 0.
2890 **/
James Smart87af33f2007-10-27 13:37:43 -04002891void
James Smart2e0fef82007-06-17 19:56:36 -05002892lpfc_cleanup(struct lpfc_vport *vport)
dea31012005-04-17 16:05:31 -05002893{
James Smart87af33f2007-10-27 13:37:43 -04002894 struct lpfc_hba *phba = vport->phba;
dea31012005-04-17 16:05:31 -05002895 struct lpfc_nodelist *ndlp, *next_ndlp;
James Smarta8adb832007-10-27 13:37:53 -04002896 int i = 0;
dea31012005-04-17 16:05:31 -05002897
James Smart87af33f2007-10-27 13:37:43 -04002898 if (phba->link_state > LPFC_LINK_DOWN)
2899 lpfc_port_link_failure(vport);
2900
Gaurav Srivastava5e633302021-06-08 10:05:49 +05302901 /* Clean up VMID resources */
2902 if (lpfc_is_vmid_enabled(phba))
2903 lpfc_vmid_vport_cleanup(vport);
2904
James Smart87af33f2007-10-27 13:37:43 -04002905 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
James Smart58da1ff2008-04-07 10:15:56 -04002906 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2907 ndlp->nlp_DID == Fabric_DID) {
2908 /* Just free up ndlp with Fabric_DID for vports */
2909 lpfc_nlp_put(ndlp);
2910 continue;
2911 }
2912
James Smarta70e63e2020-11-15 11:26:38 -08002913 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2914 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
James Smarteff4a012012-01-18 16:25:25 -05002915 lpfc_nlp_put(ndlp);
2916 continue;
2917 }
2918
James Smarte9b11082020-11-15 11:26:33 -08002919 /* Fabric Ports not in UNMAPPED state are cleaned up in the
2920 * DEVICE_RM event.
2921 */
2922 if (ndlp->nlp_type & NLP_FABRIC &&
2923 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
James Smart87af33f2007-10-27 13:37:43 -04002924 lpfc_disc_state_machine(vport, ndlp, NULL,
2925 NLP_EVT_DEVICE_RECOVERY);
James Smarte47c9092008-02-08 18:49:26 -05002926
James Smarte9b11082020-11-15 11:26:33 -08002927 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2928 lpfc_disc_state_machine(vport, ndlp, NULL,
2929 NLP_EVT_DEVICE_RM);
James Smart87af33f2007-10-27 13:37:43 -04002930 }
2931
James Smarta8adb832007-10-27 13:37:53 -04002932 /* At this point, ALL ndlp's should be gone
2933 * because of the previous NLP_EVT_DEVICE_RM.
2934 * Lets wait for this to happen, if needed.
2935 */
James Smart87af33f2007-10-27 13:37:43 -04002936 while (!list_empty(&vport->fc_nodes)) {
James Smarta8adb832007-10-27 13:37:53 -04002937 if (i++ > 3000) {
Dick Kennedy372c1872020-06-30 14:50:00 -07002938 lpfc_printf_vlog(vport, KERN_ERR,
2939 LOG_TRACE_EVENT,
James Smarta8adb832007-10-27 13:37:53 -04002940 "0233 Nodelist not empty\n");
James Smarte47c9092008-02-08 18:49:26 -05002941 list_for_each_entry_safe(ndlp, next_ndlp,
2942 &vport->fc_nodes, nlp_listp) {
2943 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
James Smarte9b11082020-11-15 11:26:33 -08002944 LOG_TRACE_EVENT,
2945 "0282 did:x%x ndlp:x%px "
2946 "refcnt:%d xflags x%x nflag x%x\n",
2947 ndlp->nlp_DID, (void *)ndlp,
2948 kref_read(&ndlp->kref),
2949 ndlp->fc4_xpt_flags,
2950 ndlp->nlp_flag);
James Smarte47c9092008-02-08 18:49:26 -05002951 }
James Smarta8adb832007-10-27 13:37:53 -04002952 break;
James Smart87af33f2007-10-27 13:37:43 -04002953 }
James Smarta8adb832007-10-27 13:37:53 -04002954
2955 /* Wait for any activity on ndlps to settle */
2956 msleep(10);
James Smart87af33f2007-10-27 13:37:43 -04002957 }
James Smart1151e3e2011-02-16 12:39:35 -05002958 lpfc_cleanup_vports_rrqs(vport, NULL);
dea31012005-04-17 16:05:31 -05002959}
2960
James Smarte59058c2008-08-24 21:49:00 -04002961/**
James Smart3621a712009-04-06 18:47:14 -04002962 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
James Smarte59058c2008-08-24 21:49:00 -04002963 * @vport: pointer to a virtual N_Port data structure.
2964 *
2965 * This routine stops all the timers associated with a @vport. This function
2966 * is invoked before disabling or deleting a @vport. Note that the physical
2967 * port is treated as @vport 0.
2968 **/
James Smart92d7f7b2007-06-17 19:56:38 -05002969void
2970lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea31012005-04-17 16:05:31 -05002971{
James Smart92d7f7b2007-06-17 19:56:38 -05002972 del_timer_sync(&vport->els_tmofunc);
James Smart92494142011-02-16 12:39:44 -05002973 del_timer_sync(&vport->delayed_disc_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -05002974 lpfc_can_disctmo(vport);
2975 return;
dea31012005-04-17 16:05:31 -05002976}
2977
James Smarte59058c2008-08-24 21:49:00 -04002978/**
James Smartecfd03c2010-02-12 14:41:27 -05002979 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2980 * @phba: pointer to lpfc hba data structure.
2981 *
2982 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2983 * caller of this routine should already hold the host lock.
2984 **/
2985void
2986__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2987{
James Smart5ac6b302010-10-22 11:05:36 -04002988 /* Clear pending FCF rediscovery wait flag */
2989 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2990
James Smartecfd03c2010-02-12 14:41:27 -05002991 /* Now, try to stop the timer */
2992 del_timer(&phba->fcf.redisc_wait);
2993}
2994
2995/**
2996 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2997 * @phba: pointer to lpfc hba data structure.
2998 *
2999 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3000 * checks whether the FCF rediscovery wait timer is pending with the host
3001 * lock held before proceeding with disabling the timer and clearing the
3002 * wait timer pendig flag.
3003 **/
3004void
3005lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3006{
3007 spin_lock_irq(&phba->hbalock);
3008 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3009 /* FCF rediscovery timer already fired or stopped */
3010 spin_unlock_irq(&phba->hbalock);
3011 return;
3012 }
3013 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
James Smart5ac6b302010-10-22 11:05:36 -04003014 /* Clear failover in progress flags */
3015 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
James Smartecfd03c2010-02-12 14:41:27 -05003016 spin_unlock_irq(&phba->hbalock);
3017}
3018
3019/**
James Smart3772a992009-05-22 14:50:54 -04003020 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
James Smarte59058c2008-08-24 21:49:00 -04003021 * @phba: pointer to lpfc hba data structure.
3022 *
3023 * This routine stops all the timers associated with a HBA. This function is
3024 * invoked before either putting a HBA offline or unloading the driver.
3025 **/
James Smart3772a992009-05-22 14:50:54 -04003026void
3027lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05003028{
James Smartcdb42be2019-01-28 11:14:21 -08003029 if (phba->pport)
3030 lpfc_stop_vport_timers(phba->pport);
James Smart32517fc2019-01-28 11:14:33 -08003031 cancel_delayed_work_sync(&phba->eq_delay_work);
Dick Kennedy317aeb82020-06-30 14:49:59 -07003032 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
James Smart2e0fef82007-06-17 19:56:36 -05003033 del_timer_sync(&phba->sli.mbox_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -05003034 del_timer_sync(&phba->fabric_block_timer);
James Smart93996272008-08-24 21:50:30 -04003035 del_timer_sync(&phba->eratt_poll);
James Smart3772a992009-05-22 14:50:54 -04003036 del_timer_sync(&phba->hb_tmofunc);
James Smart1151e3e2011-02-16 12:39:35 -05003037 if (phba->sli_rev == LPFC_SLI_REV4) {
3038 del_timer_sync(&phba->rrq_tmr);
3039 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3040 }
James Smarta22d73b2021-01-04 10:02:38 -08003041 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
James Smart3772a992009-05-22 14:50:54 -04003042
3043 switch (phba->pci_dev_grp) {
3044 case LPFC_PCI_DEV_LP:
3045 /* Stop any LightPulse device specific driver timers */
3046 del_timer_sync(&phba->fcp_poll_timer);
3047 break;
3048 case LPFC_PCI_DEV_OC:
Paul Walmsleycc0e5f12019-07-11 20:52:33 -07003049 /* Stop any OneConnect device specific driver timers */
James Smartecfd03c2010-02-12 14:41:27 -05003050 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
James Smart3772a992009-05-22 14:50:54 -04003051 break;
3052 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07003053 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04003054 "0297 Invalid device group (x%x)\n",
3055 phba->pci_dev_grp);
3056 break;
3057 }
James Smart2e0fef82007-06-17 19:56:36 -05003058 return;
dea31012005-04-17 16:05:31 -05003059}
3060
James Smarte59058c2008-08-24 21:49:00 -04003061/**
James Smart3621a712009-04-06 18:47:14 -04003062 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
James Smarte59058c2008-08-24 21:49:00 -04003063 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01003064 * @mbx_action: flag for mailbox no wait action.
James Smarte59058c2008-08-24 21:49:00 -04003065 *
3066 * This routine marks a HBA's management interface as blocked. Once the HBA's
3067 * management interface is marked as blocked, all the user space access to
3068 * the HBA, whether they are from sysfs interface or libdfc interface will
3069 * all be blocked. The HBA is set to block the management interface when the
3070 * driver prepares the HBA interface for online or offline.
3071 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01003072static void
James Smart618a5232012-06-12 13:54:36 -04003073lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
Adrian Bunka6ababd2007-11-05 18:07:33 +01003074{
3075 unsigned long iflag;
James Smart6e7288d2010-06-07 15:23:35 -04003076 uint8_t actcmd = MBX_HEARTBEAT;
3077 unsigned long timeout;
3078
Adrian Bunka6ababd2007-11-05 18:07:33 +01003079 spin_lock_irqsave(&phba->hbalock, iflag);
3080 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
James Smart618a5232012-06-12 13:54:36 -04003081 spin_unlock_irqrestore(&phba->hbalock, iflag);
3082 if (mbx_action == LPFC_MBX_NO_WAIT)
3083 return;
3084 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3085 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta183a152011-10-10 21:32:43 -04003086 if (phba->sli.mbox_active) {
James Smart6e7288d2010-06-07 15:23:35 -04003087 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
James Smarta183a152011-10-10 21:32:43 -04003088 /* Determine how long we might wait for the active mailbox
3089 * command to be gracefully completed by firmware.
3090 */
3091 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3092 phba->sli.mbox_active) * 1000) + jiffies;
3093 }
Adrian Bunka6ababd2007-11-05 18:07:33 +01003094 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta183a152011-10-10 21:32:43 -04003095
James Smart6e7288d2010-06-07 15:23:35 -04003096 /* Wait for the outstnading mailbox command to complete */
3097 while (phba->sli.mbox_active) {
3098 /* Check active mailbox complete status every 2ms */
3099 msleep(2);
3100 if (time_after(jiffies, timeout)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07003101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3102 "2813 Mgmt IO is Blocked %x "
3103 "- mbox cmd %x still active\n",
3104 phba->sli.sli_flag, actcmd);
James Smart6e7288d2010-06-07 15:23:35 -04003105 break;
3106 }
3107 }
Adrian Bunka6ababd2007-11-05 18:07:33 +01003108}
3109
James Smarte59058c2008-08-24 21:49:00 -04003110/**
James Smart6b5151f2012-01-18 16:24:06 -05003111 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3112 * @phba: pointer to lpfc hba data structure.
3113 *
3114 * Allocate RPIs for all active remote nodes. This is needed whenever
3115 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3116 * is to fixup the temporary rpi assignments.
3117 **/
3118void
3119lpfc_sli4_node_prep(struct lpfc_hba *phba)
3120{
3121 struct lpfc_nodelist *ndlp, *next_ndlp;
3122 struct lpfc_vport **vports;
James Smart9d3d3402017-04-21 16:05:00 -07003123 int i, rpi;
James Smart6b5151f2012-01-18 16:24:06 -05003124
3125 if (phba->sli_rev != LPFC_SLI_REV4)
3126 return;
3127
3128 vports = lpfc_create_vport_work_array(phba);
James Smart9d3d3402017-04-21 16:05:00 -07003129 if (vports == NULL)
3130 return;
James Smart6b5151f2012-01-18 16:24:06 -05003131
James Smart9d3d3402017-04-21 16:05:00 -07003132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3133 if (vports[i]->load_flag & FC_UNLOADING)
3134 continue;
3135
3136 list_for_each_entry_safe(ndlp, next_ndlp,
3137 &vports[i]->fc_nodes,
3138 nlp_listp) {
James Smart9d3d3402017-04-21 16:05:00 -07003139 rpi = lpfc_sli4_alloc_rpi(phba);
3140 if (rpi == LPFC_RPI_ALLOC_ERROR) {
James Smart307e3382020-11-15 11:26:30 -08003141 /* TODO print log? */
James Smart9d3d3402017-04-21 16:05:00 -07003142 continue;
James Smart6b5151f2012-01-18 16:24:06 -05003143 }
James Smart9d3d3402017-04-21 16:05:00 -07003144 ndlp->nlp_rpi = rpi;
James Smart0f154222019-09-21 20:58:52 -07003145 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3146 LOG_NODE | LOG_DISCOVERY,
3147 "0009 Assign RPI x%x to ndlp x%px "
James Smart307e3382020-11-15 11:26:30 -08003148 "DID:x%06x flg:x%x\n",
James Smart0f154222019-09-21 20:58:52 -07003149 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
James Smart307e3382020-11-15 11:26:30 -08003150 ndlp->nlp_flag);
James Smart6b5151f2012-01-18 16:24:06 -05003151 }
3152 }
3153 lpfc_destroy_vport_work_array(phba, vports);
3154}
3155
3156/**
James Smartc4908502019-01-28 11:14:28 -08003157 * lpfc_create_expedite_pool - create expedite pool
3158 * @phba: pointer to lpfc hba data structure.
3159 *
3160 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3161 * to expedite pool. Mark them as expedite.
3162 **/
Bart Van Assche3999df72019-03-28 11:06:16 -07003163static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
James Smartc4908502019-01-28 11:14:28 -08003164{
3165 struct lpfc_sli4_hdw_queue *qp;
3166 struct lpfc_io_buf *lpfc_ncmd;
3167 struct lpfc_io_buf *lpfc_ncmd_next;
3168 struct lpfc_epd_pool *epd_pool;
3169 unsigned long iflag;
3170
3171 epd_pool = &phba->epd_pool;
3172 qp = &phba->sli4_hba.hdwq[0];
3173
3174 spin_lock_init(&epd_pool->lock);
3175 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3176 spin_lock(&epd_pool->lock);
3177 INIT_LIST_HEAD(&epd_pool->list);
3178 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3179 &qp->lpfc_io_buf_list_put, list) {
3180 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3181 lpfc_ncmd->expedite = true;
3182 qp->put_io_bufs--;
3183 epd_pool->count++;
3184 if (epd_pool->count >= XRI_BATCH)
3185 break;
3186 }
3187 spin_unlock(&epd_pool->lock);
3188 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3189}
3190
3191/**
3192 * lpfc_destroy_expedite_pool - destroy expedite pool
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3196 * of HWQ 0. Clear the mark.
3197 **/
Bart Van Assche3999df72019-03-28 11:06:16 -07003198static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
James Smartc4908502019-01-28 11:14:28 -08003199{
3200 struct lpfc_sli4_hdw_queue *qp;
3201 struct lpfc_io_buf *lpfc_ncmd;
3202 struct lpfc_io_buf *lpfc_ncmd_next;
3203 struct lpfc_epd_pool *epd_pool;
3204 unsigned long iflag;
3205
3206 epd_pool = &phba->epd_pool;
3207 qp = &phba->sli4_hba.hdwq[0];
3208
3209 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3210 spin_lock(&epd_pool->lock);
3211 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3212 &epd_pool->list, list) {
3213 list_move_tail(&lpfc_ncmd->list,
3214 &qp->lpfc_io_buf_list_put);
3215 lpfc_ncmd->flags = false;
3216 qp->put_io_bufs++;
3217 epd_pool->count--;
3218 }
3219 spin_unlock(&epd_pool->lock);
3220 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3221}
3222
3223/**
3224 * lpfc_create_multixri_pools - create multi-XRI pools
3225 * @phba: pointer to lpfc hba data structure.
3226 *
3227 * This routine initialize public, private per HWQ. Then, move XRIs from
3228 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3229 * Initialized.
3230 **/
3231void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3232{
3233 u32 i, j;
3234 u32 hwq_count;
3235 u32 count_per_hwq;
3236 struct lpfc_io_buf *lpfc_ncmd;
3237 struct lpfc_io_buf *lpfc_ncmd_next;
3238 unsigned long iflag;
3239 struct lpfc_sli4_hdw_queue *qp;
3240 struct lpfc_multixri_pool *multixri_pool;
3241 struct lpfc_pbl_pool *pbl_pool;
3242 struct lpfc_pvt_pool *pvt_pool;
3243
3244 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3245 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3246 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3247 phba->sli4_hba.io_xri_cnt);
3248
3249 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3250 lpfc_create_expedite_pool(phba);
3251
3252 hwq_count = phba->cfg_hdw_queue;
3253 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3254
3255 for (i = 0; i < hwq_count; i++) {
3256 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3257
3258 if (!multixri_pool) {
3259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3260 "1238 Failed to allocate memory for "
3261 "multixri_pool\n");
3262
3263 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3264 lpfc_destroy_expedite_pool(phba);
3265
3266 j = 0;
3267 while (j < i) {
3268 qp = &phba->sli4_hba.hdwq[j];
3269 kfree(qp->p_multixri_pool);
3270 j++;
3271 }
3272 phba->cfg_xri_rebalancing = 0;
3273 return;
3274 }
3275
3276 qp = &phba->sli4_hba.hdwq[i];
3277 qp->p_multixri_pool = multixri_pool;
3278
3279 multixri_pool->xri_limit = count_per_hwq;
3280 multixri_pool->rrb_next_hwqid = i;
3281
3282 /* Deal with public free xri pool */
3283 pbl_pool = &multixri_pool->pbl_pool;
3284 spin_lock_init(&pbl_pool->lock);
3285 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3286 spin_lock(&pbl_pool->lock);
3287 INIT_LIST_HEAD(&pbl_pool->list);
3288 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3289 &qp->lpfc_io_buf_list_put, list) {
3290 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3291 qp->put_io_bufs--;
3292 pbl_pool->count++;
3293 }
3294 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3295 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3296 pbl_pool->count, i);
3297 spin_unlock(&pbl_pool->lock);
3298 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3299
3300 /* Deal with private free xri pool */
3301 pvt_pool = &multixri_pool->pvt_pool;
3302 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3303 pvt_pool->low_watermark = XRI_BATCH;
3304 spin_lock_init(&pvt_pool->lock);
3305 spin_lock_irqsave(&pvt_pool->lock, iflag);
3306 INIT_LIST_HEAD(&pvt_pool->list);
3307 pvt_pool->count = 0;
3308 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3309 }
3310}
3311
3312/**
3313 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3314 * @phba: pointer to lpfc hba data structure.
3315 *
3316 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3317 **/
Bart Van Assche3999df72019-03-28 11:06:16 -07003318static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
James Smartc4908502019-01-28 11:14:28 -08003319{
3320 u32 i;
3321 u32 hwq_count;
3322 struct lpfc_io_buf *lpfc_ncmd;
3323 struct lpfc_io_buf *lpfc_ncmd_next;
3324 unsigned long iflag;
3325 struct lpfc_sli4_hdw_queue *qp;
3326 struct lpfc_multixri_pool *multixri_pool;
3327 struct lpfc_pbl_pool *pbl_pool;
3328 struct lpfc_pvt_pool *pvt_pool;
3329
3330 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3331 lpfc_destroy_expedite_pool(phba);
3332
James Smartc00f62e2019-08-14 16:57:11 -07003333 if (!(phba->pport->load_flag & FC_UNLOADING))
3334 lpfc_sli_flush_io_rings(phba);
James Smartc66a9192019-03-12 16:30:19 -07003335
James Smartc4908502019-01-28 11:14:28 -08003336 hwq_count = phba->cfg_hdw_queue;
3337
3338 for (i = 0; i < hwq_count; i++) {
3339 qp = &phba->sli4_hba.hdwq[i];
3340 multixri_pool = qp->p_multixri_pool;
3341 if (!multixri_pool)
3342 continue;
3343
3344 qp->p_multixri_pool = NULL;
3345
3346 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3347
3348 /* Deal with public free xri pool */
3349 pbl_pool = &multixri_pool->pbl_pool;
3350 spin_lock(&pbl_pool->lock);
3351
3352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3353 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3354 pbl_pool->count, i);
3355
3356 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3357 &pbl_pool->list, list) {
3358 list_move_tail(&lpfc_ncmd->list,
3359 &qp->lpfc_io_buf_list_put);
3360 qp->put_io_bufs++;
3361 pbl_pool->count--;
3362 }
3363
3364 INIT_LIST_HEAD(&pbl_pool->list);
3365 pbl_pool->count = 0;
3366
3367 spin_unlock(&pbl_pool->lock);
3368
3369 /* Deal with private free xri pool */
3370 pvt_pool = &multixri_pool->pvt_pool;
3371 spin_lock(&pvt_pool->lock);
3372
3373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3374 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3375 pvt_pool->count, i);
3376
3377 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3378 &pvt_pool->list, list) {
3379 list_move_tail(&lpfc_ncmd->list,
3380 &qp->lpfc_io_buf_list_put);
3381 qp->put_io_bufs++;
3382 pvt_pool->count--;
3383 }
3384
3385 INIT_LIST_HEAD(&pvt_pool->list);
3386 pvt_pool->count = 0;
3387
3388 spin_unlock(&pvt_pool->lock);
3389 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3390
3391 kfree(multixri_pool);
3392 }
3393}
3394
3395/**
James Smart3621a712009-04-06 18:47:14 -04003396 * lpfc_online - Initialize and bring a HBA online
James Smarte59058c2008-08-24 21:49:00 -04003397 * @phba: pointer to lpfc hba data structure.
3398 *
3399 * This routine initializes the HBA and brings a HBA online. During this
3400 * process, the management interface is blocked to prevent user space access
3401 * to the HBA interfering with the driver initialization.
3402 *
3403 * Return codes
3404 * 0 - successful
3405 * 1 - failed
3406 **/
dea31012005-04-17 16:05:31 -05003407int
James Smart2e0fef82007-06-17 19:56:36 -05003408lpfc_online(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05003409{
Julia Lawall372bd282008-12-16 16:15:08 +01003410 struct lpfc_vport *vport;
James Smart549e55c2007-08-02 11:09:51 -04003411 struct lpfc_vport **vports;
Dick Kennedya145fda2017-08-23 16:55:44 -07003412 int i, error = 0;
James Smart16a3a202013-04-17 20:14:38 -04003413 bool vpis_cleared = false;
James Smart2e0fef82007-06-17 19:56:36 -05003414
dea31012005-04-17 16:05:31 -05003415 if (!phba)
3416 return 0;
Julia Lawall372bd282008-12-16 16:15:08 +01003417 vport = phba->pport;
dea31012005-04-17 16:05:31 -05003418
James Smart2e0fef82007-06-17 19:56:36 -05003419 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea31012005-04-17 16:05:31 -05003420 return 0;
3421
James Smarted957682007-06-17 19:56:37 -05003422 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04003423 "0458 Bring Adapter online\n");
dea31012005-04-17 16:05:31 -05003424
James Smart618a5232012-06-12 13:54:36 -04003425 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
dea31012005-04-17 16:05:31 -05003426
James Smartda0436e2009-05-22 14:51:39 -04003427 if (phba->sli_rev == LPFC_SLI_REV4) {
3428 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3429 lpfc_unblock_mgmt_io(phba);
3430 return 1;
3431 }
James Smart16a3a202013-04-17 20:14:38 -04003432 spin_lock_irq(&phba->hbalock);
3433 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3434 vpis_cleared = true;
3435 spin_unlock_irq(&phba->hbalock);
Dick Kennedya145fda2017-08-23 16:55:44 -07003436
3437 /* Reestablish the local initiator port.
3438 * The offline process destroyed the previous lport.
3439 */
3440 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3441 !phba->nvmet_support) {
3442 error = lpfc_nvme_create_localport(phba->pport);
3443 if (error)
Dick Kennedy372c1872020-06-30 14:50:00 -07003444 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedya145fda2017-08-23 16:55:44 -07003445 "6132 NVME restore reg failed "
3446 "on nvmei error x%x\n", error);
3447 }
James Smartda0436e2009-05-22 14:51:39 -04003448 } else {
James Smart895427b2017-02-12 13:52:30 -08003449 lpfc_sli_queue_init(phba);
James Smartda0436e2009-05-22 14:51:39 -04003450 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3451 lpfc_unblock_mgmt_io(phba);
3452 return 1;
3453 }
James Smart46fa3112007-04-25 09:51:45 -04003454 }
dea31012005-04-17 16:05:31 -05003455
James Smart549e55c2007-08-02 11:09:51 -04003456 vports = lpfc_create_vport_work_array(phba);
Arnd Bergmannaeb66412016-03-14 15:29:44 +01003457 if (vports != NULL) {
James Smartda0436e2009-05-22 14:51:39 -04003458 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
James Smart549e55c2007-08-02 11:09:51 -04003459 struct Scsi_Host *shost;
3460 shost = lpfc_shost_from_vport(vports[i]);
3461 spin_lock_irq(shost->host_lock);
3462 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3463 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3464 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
James Smart16a3a202013-04-17 20:14:38 -04003465 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart1c6834a2009-07-19 10:01:26 -04003466 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
James Smart16a3a202013-04-17 20:14:38 -04003467 if ((vpis_cleared) &&
3468 (vports[i]->port_type !=
3469 LPFC_PHYSICAL_PORT))
3470 vports[i]->vpi = 0;
3471 }
James Smart549e55c2007-08-02 11:09:51 -04003472 spin_unlock_irq(shost->host_lock);
3473 }
Arnd Bergmannaeb66412016-03-14 15:29:44 +01003474 }
3475 lpfc_destroy_vport_work_array(phba, vports);
dea31012005-04-17 16:05:31 -05003476
James Smartc4908502019-01-28 11:14:28 -08003477 if (phba->cfg_xri_rebalancing)
3478 lpfc_create_multixri_pools(phba);
3479
James Smart93a4d6f2019-11-04 16:57:05 -08003480 lpfc_cpuhp_add(phba);
3481
James Smart46fa3112007-04-25 09:51:45 -04003482 lpfc_unblock_mgmt_io(phba);
dea31012005-04-17 16:05:31 -05003483 return 0;
3484}
3485
James Smarte59058c2008-08-24 21:49:00 -04003486/**
James Smart3621a712009-04-06 18:47:14 -04003487 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
James Smarte59058c2008-08-24 21:49:00 -04003488 * @phba: pointer to lpfc hba data structure.
3489 *
3490 * This routine marks a HBA's management interface as not blocked. Once the
3491 * HBA's management interface is marked as not blocked, all the user space
3492 * access to the HBA, whether they are from sysfs interface or libdfc
3493 * interface will be allowed. The HBA is set to block the management interface
3494 * when the driver prepares the HBA interface for online or offline and then
3495 * set to unblock the management interface afterwards.
3496 **/
James Smart46fa3112007-04-25 09:51:45 -04003497void
James Smart46fa3112007-04-25 09:51:45 -04003498lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3499{
3500 unsigned long iflag;
3501
James Smart2e0fef82007-06-17 19:56:36 -05003502 spin_lock_irqsave(&phba->hbalock, iflag);
3503 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3504 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart46fa3112007-04-25 09:51:45 -04003505}
3506
James Smarte59058c2008-08-24 21:49:00 -04003507/**
James Smart3621a712009-04-06 18:47:14 -04003508 * lpfc_offline_prep - Prepare a HBA to be brought offline
James Smarte59058c2008-08-24 21:49:00 -04003509 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01003510 * @mbx_action: flag for mailbox shutdown action.
James Smarte59058c2008-08-24 21:49:00 -04003511 *
3512 * This routine is invoked to prepare a HBA to be brought offline. It performs
3513 * unregistration login to all the nodes on all vports and flushes the mailbox
3514 * queue to make it ready to be brought offline.
3515 **/
James Smart46fa3112007-04-25 09:51:45 -04003516void
James Smart618a5232012-06-12 13:54:36 -04003517lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
James Smart46fa3112007-04-25 09:51:45 -04003518{
James Smart2e0fef82007-06-17 19:56:36 -05003519 struct lpfc_vport *vport = phba->pport;
James Smart46fa3112007-04-25 09:51:45 -04003520 struct lpfc_nodelist *ndlp, *next_ndlp;
James Smart87af33f2007-10-27 13:37:43 -04003521 struct lpfc_vport **vports;
James Smart72100cc2010-02-12 14:43:01 -05003522 struct Scsi_Host *shost;
James Smart87af33f2007-10-27 13:37:43 -04003523 int i;
dea31012005-04-17 16:05:31 -05003524
James Smart2e0fef82007-06-17 19:56:36 -05003525 if (vport->fc_flag & FC_OFFLINE_MODE)
James Smart46fa3112007-04-25 09:51:45 -04003526 return;
dea31012005-04-17 16:05:31 -05003527
James Smart618a5232012-06-12 13:54:36 -04003528 lpfc_block_mgmt_io(phba, mbx_action);
dea31012005-04-17 16:05:31 -05003529
3530 lpfc_linkdown(phba);
3531
James Smart87af33f2007-10-27 13:37:43 -04003532 /* Issue an unreg_login to all nodes on all vports */
3533 vports = lpfc_create_vport_work_array(phba);
3534 if (vports != NULL) {
James Smartda0436e2009-05-22 14:51:39 -04003535 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
James Smarta8adb832007-10-27 13:37:53 -04003536 if (vports[i]->load_flag & FC_UNLOADING)
3537 continue;
James Smart72100cc2010-02-12 14:43:01 -05003538 shost = lpfc_shost_from_vport(vports[i]);
3539 spin_lock_irq(shost->host_lock);
James Smartc8685952009-11-18 15:39:16 -05003540 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
James Smart695a8142010-01-26 23:08:03 -05003541 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3542 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
James Smart72100cc2010-02-12 14:43:01 -05003543 spin_unlock_irq(shost->host_lock);
James Smart695a8142010-01-26 23:08:03 -05003544
James Smart87af33f2007-10-27 13:37:43 -04003545 shost = lpfc_shost_from_vport(vports[i]);
3546 list_for_each_entry_safe(ndlp, next_ndlp,
3547 &vports[i]->fc_nodes,
3548 nlp_listp) {
James Smart0f154222019-09-21 20:58:52 -07003549
James Smartc6adba12020-11-15 11:26:34 -08003550 spin_lock_irq(&ndlp->lock);
James Smart87af33f2007-10-27 13:37:43 -04003551 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
James Smartc6adba12020-11-15 11:26:34 -08003552 spin_unlock_irq(&ndlp->lock);
James Smartaffbe242021-07-07 11:43:42 -07003553
3554 lpfc_unreg_rpi(vports[i], ndlp);
James Smart6b5151f2012-01-18 16:24:06 -05003555 /*
3556 * Whenever an SLI4 port goes offline, free the
James Smart401ee0c2012-03-01 22:35:34 -05003557 * RPI. Get a new RPI when the adapter port
3558 * comes back online.
James Smart6b5151f2012-01-18 16:24:06 -05003559 */
James Smartbe6bb942015-04-07 15:07:22 -04003560 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smarte9b11082020-11-15 11:26:33 -08003561 lpfc_printf_vlog(vports[i], KERN_INFO,
James Smart0f154222019-09-21 20:58:52 -07003562 LOG_NODE | LOG_DISCOVERY,
3563 "0011 Free RPI x%x on "
James Smartf1156122021-04-11 18:31:24 -07003564 "ndlp: x%px did x%x\n",
James Smart0f154222019-09-21 20:58:52 -07003565 ndlp->nlp_rpi, ndlp,
James Smart307e3382020-11-15 11:26:30 -08003566 ndlp->nlp_DID);
James Smart6b5151f2012-01-18 16:24:06 -05003567 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
James Smart0f154222019-09-21 20:58:52 -07003568 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
James Smartbe6bb942015-04-07 15:07:22 -04003569 }
James Smart307e3382020-11-15 11:26:30 -08003570
3571 if (ndlp->nlp_type & NLP_FABRIC) {
3572 lpfc_disc_state_machine(vports[i], ndlp,
3573 NULL, NLP_EVT_DEVICE_RECOVERY);
James Smarte9b11082020-11-15 11:26:33 -08003574
3575 /* Don't remove the node unless the
3576 * has been unregistered with the
3577 * transport. If so, let dev_loss
3578 * take care of the node.
3579 */
3580 if (!(ndlp->fc4_xpt_flags &
3581 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3582 lpfc_disc_state_machine
3583 (vports[i], ndlp,
3584 NULL,
3585 NLP_EVT_DEVICE_RM);
James Smart307e3382020-11-15 11:26:30 -08003586 }
James Smart87af33f2007-10-27 13:37:43 -04003587 }
3588 }
3589 }
James Smart09372822008-01-11 01:52:54 -05003590 lpfc_destroy_vport_work_array(phba, vports);
dea31012005-04-17 16:05:31 -05003591
James Smart618a5232012-06-12 13:54:36 -04003592 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
Dick Kennedyf485c182017-09-29 17:34:34 -07003593
3594 if (phba->wq)
3595 flush_workqueue(phba->wq);
James Smart46fa3112007-04-25 09:51:45 -04003596}
3597
James Smarte59058c2008-08-24 21:49:00 -04003598/**
James Smart3621a712009-04-06 18:47:14 -04003599 * lpfc_offline - Bring a HBA offline
James Smarte59058c2008-08-24 21:49:00 -04003600 * @phba: pointer to lpfc hba data structure.
3601 *
3602 * This routine actually brings a HBA offline. It stops all the timers
3603 * associated with the HBA, brings down the SLI layer, and eventually
3604 * marks the HBA as in offline state for the upper layer protocol.
3605 **/
James Smart46fa3112007-04-25 09:51:45 -04003606void
James Smart2e0fef82007-06-17 19:56:36 -05003607lpfc_offline(struct lpfc_hba *phba)
James Smart46fa3112007-04-25 09:51:45 -04003608{
James Smart549e55c2007-08-02 11:09:51 -04003609 struct Scsi_Host *shost;
3610 struct lpfc_vport **vports;
3611 int i;
James Smart46fa3112007-04-25 09:51:45 -04003612
James Smart549e55c2007-08-02 11:09:51 -04003613 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
James Smart46fa3112007-04-25 09:51:45 -04003614 return;
James Smart688a8862006-07-06 15:49:56 -04003615
James Smartda0436e2009-05-22 14:51:39 -04003616 /* stop port and all timers associated with this hba */
3617 lpfc_stop_port(phba);
Dick Kennedy4b40d022017-08-23 16:55:38 -07003618
3619 /* Tear down the local and target port registrations. The
3620 * nvme transports need to cleanup.
3621 */
3622 lpfc_nvmet_destroy_targetport(phba);
3623 lpfc_nvme_destroy_localport(phba->pport);
3624
James Smart51ef4c22007-08-02 11:10:31 -04003625 vports = lpfc_create_vport_work_array(phba);
3626 if (vports != NULL)
James Smartda0436e2009-05-22 14:51:39 -04003627 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
James Smart51ef4c22007-08-02 11:10:31 -04003628 lpfc_stop_vport_timers(vports[i]);
James Smart09372822008-01-11 01:52:54 -05003629 lpfc_destroy_vport_work_array(phba, vports);
James Smart92d7f7b2007-06-17 19:56:38 -05003630 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04003631 "0460 Bring Adapter offline\n");
dea31012005-04-17 16:05:31 -05003632 /* Bring down the SLI Layer and cleanup. The HBA is offline
3633 now. */
3634 lpfc_sli_hba_down(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05003635 spin_lock_irq(&phba->hbalock);
James Smart7054a602007-04-25 09:52:34 -04003636 phba->work_ha = 0;
James Smart92d7f7b2007-06-17 19:56:38 -05003637 spin_unlock_irq(&phba->hbalock);
James Smart549e55c2007-08-02 11:09:51 -04003638 vports = lpfc_create_vport_work_array(phba);
3639 if (vports != NULL)
James Smartda0436e2009-05-22 14:51:39 -04003640 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
James Smart549e55c2007-08-02 11:09:51 -04003641 shost = lpfc_shost_from_vport(vports[i]);
James Smart549e55c2007-08-02 11:09:51 -04003642 spin_lock_irq(shost->host_lock);
3643 vports[i]->work_port_events = 0;
3644 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3645 spin_unlock_irq(shost->host_lock);
3646 }
James Smart09372822008-01-11 01:52:54 -05003647 lpfc_destroy_vport_work_array(phba, vports);
James Smartf0871ab2021-01-04 10:02:32 -08003648 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3649 * in hba_unset
3650 */
3651 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3652 __lpfc_cpuhp_remove(phba);
James Smartc4908502019-01-28 11:14:28 -08003653
3654 if (phba->cfg_xri_rebalancing)
3655 lpfc_destroy_multixri_pools(phba);
dea31012005-04-17 16:05:31 -05003656}
3657
James Smarte59058c2008-08-24 21:49:00 -04003658/**
James Smart3621a712009-04-06 18:47:14 -04003659 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
James Smarte59058c2008-08-24 21:49:00 -04003660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is to free all the SCSI buffers and IOCBs from the driver
3663 * list back to kernel. It is called from lpfc_pci_remove_one to free
3664 * the internal resources before the device is removed from the system.
James Smarte59058c2008-08-24 21:49:00 -04003665 **/
James Smart8a9d2e82012-05-09 21:16:12 -04003666static void
James Smart2e0fef82007-06-17 19:56:36 -05003667lpfc_scsi_free(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05003668{
James Smartc4908502019-01-28 11:14:28 -08003669 struct lpfc_io_buf *sb, *sb_next;
dea31012005-04-17 16:05:31 -05003670
James Smart895427b2017-02-12 13:52:30 -08003671 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3672 return;
3673
James Smart2e0fef82007-06-17 19:56:36 -05003674 spin_lock_irq(&phba->hbalock);
James Smarta40fc5f2013-04-17 20:17:40 -04003675
dea31012005-04-17 16:05:31 -05003676 /* Release all the lpfc_scsi_bufs maintained by this host. */
James Smarta40fc5f2013-04-17 20:17:40 -04003677
3678 spin_lock(&phba->scsi_buf_list_put_lock);
3679 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3680 list) {
dea31012005-04-17 16:05:31 -05003681 list_del(&sb->list);
Romain Perier771db5c2017-07-06 10:13:05 +02003682 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
James Smart92d7f7b2007-06-17 19:56:38 -05003683 sb->dma_handle);
dea31012005-04-17 16:05:31 -05003684 kfree(sb);
3685 phba->total_scsi_bufs--;
3686 }
James Smarta40fc5f2013-04-17 20:17:40 -04003687 spin_unlock(&phba->scsi_buf_list_put_lock);
3688
3689 spin_lock(&phba->scsi_buf_list_get_lock);
3690 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3691 list) {
3692 list_del(&sb->list);
Romain Perier771db5c2017-07-06 10:13:05 +02003693 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
James Smarta40fc5f2013-04-17 20:17:40 -04003694 sb->dma_handle);
3695 kfree(sb);
3696 phba->total_scsi_bufs--;
3697 }
3698 spin_unlock(&phba->scsi_buf_list_get_lock);
James Smart2e0fef82007-06-17 19:56:36 -05003699 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04003700}
James Smart0794d602019-01-28 11:14:19 -08003701
James Smart8a9d2e82012-05-09 21:16:12 -04003702/**
James Smart5e5b5112019-01-28 11:14:22 -08003703 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
James Smart895427b2017-02-12 13:52:30 -08003704 * @phba: pointer to lpfc hba data structure.
3705 *
James Smart0794d602019-01-28 11:14:19 -08003706 * This routine is to free all the IO buffers and IOCBs from the driver
James Smart895427b2017-02-12 13:52:30 -08003707 * list back to kernel. It is called from lpfc_pci_remove_one to free
3708 * the internal resources before the device is removed from the system.
3709 **/
James Smartc4908502019-01-28 11:14:28 -08003710void
James Smart5e5b5112019-01-28 11:14:22 -08003711lpfc_io_free(struct lpfc_hba *phba)
James Smart895427b2017-02-12 13:52:30 -08003712{
James Smartc4908502019-01-28 11:14:28 -08003713 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
James Smart5e5b5112019-01-28 11:14:22 -08003714 struct lpfc_sli4_hdw_queue *qp;
3715 int idx;
James Smart895427b2017-02-12 13:52:30 -08003716
James Smart5e5b5112019-01-28 11:14:22 -08003717 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3718 qp = &phba->sli4_hba.hdwq[idx];
3719 /* Release all the lpfc_nvme_bufs maintained by this host. */
3720 spin_lock(&qp->io_buf_list_put_lock);
3721 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3722 &qp->lpfc_io_buf_list_put,
3723 list) {
3724 list_del(&lpfc_ncmd->list);
3725 qp->put_io_bufs--;
3726 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3727 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
James Smartd79c9e92019-08-14 16:57:09 -07003728 if (phba->cfg_xpsgl && !phba->nvmet_support)
3729 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3730 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
James Smart5e5b5112019-01-28 11:14:22 -08003731 kfree(lpfc_ncmd);
3732 qp->total_io_bufs--;
3733 }
3734 spin_unlock(&qp->io_buf_list_put_lock);
James Smart895427b2017-02-12 13:52:30 -08003735
James Smart5e5b5112019-01-28 11:14:22 -08003736 spin_lock(&qp->io_buf_list_get_lock);
3737 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3738 &qp->lpfc_io_buf_list_get,
3739 list) {
3740 list_del(&lpfc_ncmd->list);
3741 qp->get_io_bufs--;
3742 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3743 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
James Smartd79c9e92019-08-14 16:57:09 -07003744 if (phba->cfg_xpsgl && !phba->nvmet_support)
3745 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3746 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
James Smart5e5b5112019-01-28 11:14:22 -08003747 kfree(lpfc_ncmd);
3748 qp->total_io_bufs--;
3749 }
3750 spin_unlock(&qp->io_buf_list_get_lock);
James Smart895427b2017-02-12 13:52:30 -08003751 }
James Smart895427b2017-02-12 13:52:30 -08003752}
James Smart0794d602019-01-28 11:14:19 -08003753
James Smart895427b2017-02-12 13:52:30 -08003754/**
3755 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
James Smart8a9d2e82012-05-09 21:16:12 -04003756 * @phba: pointer to lpfc hba data structure.
3757 *
3758 * This routine first calculates the sizes of the current els and allocated
3759 * scsi sgl lists, and then goes through all sgls to updates the physical
3760 * XRIs assigned due to port function reset. During port initialization, the
3761 * current els and allocated scsi sgl lists are 0s.
3762 *
3763 * Return codes
3764 * 0 - successful (for now, it always returns 0)
3765 **/
3766int
James Smart895427b2017-02-12 13:52:30 -08003767lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
James Smart8a9d2e82012-05-09 21:16:12 -04003768{
3769 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
James Smart895427b2017-02-12 13:52:30 -08003770 uint16_t i, lxri, xri_cnt, els_xri_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04003771 LIST_HEAD(els_sgl_list);
James Smart8a9d2e82012-05-09 21:16:12 -04003772 int rc;
3773
3774 /*
3775 * update on pci function's els xri-sgl list
3776 */
3777 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
James Smart895427b2017-02-12 13:52:30 -08003778
James Smart8a9d2e82012-05-09 21:16:12 -04003779 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3780 /* els xri-sgl expanded */
3781 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3782 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3783 "3157 ELS xri-sgl count increased from "
3784 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3785 els_xri_cnt);
3786 /* allocate the additional els sgls */
3787 for (i = 0; i < xri_cnt; i++) {
3788 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3789 GFP_KERNEL);
3790 if (sglq_entry == NULL) {
Dick Kennedy372c1872020-06-30 14:50:00 -07003791 lpfc_printf_log(phba, KERN_ERR,
3792 LOG_TRACE_EVENT,
James Smart8a9d2e82012-05-09 21:16:12 -04003793 "2562 Failure to allocate an "
3794 "ELS sgl entry:%d\n", i);
3795 rc = -ENOMEM;
3796 goto out_free_mem;
3797 }
3798 sglq_entry->buff_type = GEN_BUFF_TYPE;
3799 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3800 &sglq_entry->phys);
3801 if (sglq_entry->virt == NULL) {
3802 kfree(sglq_entry);
Dick Kennedy372c1872020-06-30 14:50:00 -07003803 lpfc_printf_log(phba, KERN_ERR,
3804 LOG_TRACE_EVENT,
James Smart8a9d2e82012-05-09 21:16:12 -04003805 "2563 Failure to allocate an "
3806 "ELS mbuf:%d\n", i);
3807 rc = -ENOMEM;
3808 goto out_free_mem;
3809 }
3810 sglq_entry->sgl = sglq_entry->virt;
3811 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3812 sglq_entry->state = SGL_FREED;
3813 list_add_tail(&sglq_entry->list, &els_sgl_list);
3814 }
James Smarta7892412021-04-11 18:31:15 -07003815 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart895427b2017-02-12 13:52:30 -08003816 list_splice_init(&els_sgl_list,
3817 &phba->sli4_hba.lpfc_els_sgl_list);
James Smarta7892412021-04-11 18:31:15 -07003818 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart8a9d2e82012-05-09 21:16:12 -04003819 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3820 /* els xri-sgl shrinked */
3821 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3822 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3823 "3158 ELS xri-sgl count decreased from "
3824 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3825 els_xri_cnt);
James Smarta7892412021-04-11 18:31:15 -07003826 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart895427b2017-02-12 13:52:30 -08003827 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3828 &els_sgl_list);
James Smart8a9d2e82012-05-09 21:16:12 -04003829 /* release extra els sgls from list */
3830 for (i = 0; i < xri_cnt; i++) {
3831 list_remove_head(&els_sgl_list,
3832 sglq_entry, struct lpfc_sglq, list);
3833 if (sglq_entry) {
James Smart895427b2017-02-12 13:52:30 -08003834 __lpfc_mbuf_free(phba, sglq_entry->virt,
3835 sglq_entry->phys);
James Smart8a9d2e82012-05-09 21:16:12 -04003836 kfree(sglq_entry);
3837 }
3838 }
James Smart895427b2017-02-12 13:52:30 -08003839 list_splice_init(&els_sgl_list,
3840 &phba->sli4_hba.lpfc_els_sgl_list);
James Smarta7892412021-04-11 18:31:15 -07003841 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart8a9d2e82012-05-09 21:16:12 -04003842 } else
3843 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3844 "3163 ELS xri-sgl count unchanged: %d\n",
3845 els_xri_cnt);
3846 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3847
3848 /* update xris to els sgls on the list */
3849 sglq_entry = NULL;
3850 sglq_entry_next = NULL;
3851 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
James Smart895427b2017-02-12 13:52:30 -08003852 &phba->sli4_hba.lpfc_els_sgl_list, list) {
James Smart8a9d2e82012-05-09 21:16:12 -04003853 lxri = lpfc_sli4_next_xritag(phba);
3854 if (lxri == NO_XRI) {
Dick Kennedy372c1872020-06-30 14:50:00 -07003855 lpfc_printf_log(phba, KERN_ERR,
3856 LOG_TRACE_EVENT,
James Smart8a9d2e82012-05-09 21:16:12 -04003857 "2400 Failed to allocate xri for "
3858 "ELS sgl\n");
3859 rc = -ENOMEM;
3860 goto out_free_mem;
3861 }
3862 sglq_entry->sli4_lxritag = lxri;
3863 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3864 }
James Smart895427b2017-02-12 13:52:30 -08003865 return 0;
3866
3867out_free_mem:
3868 lpfc_free_els_sgl_list(phba);
3869 return rc;
3870}
3871
3872/**
James Smartf358dd02017-02-12 13:52:34 -08003873 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3874 * @phba: pointer to lpfc hba data structure.
3875 *
3876 * This routine first calculates the sizes of the current els and allocated
3877 * scsi sgl lists, and then goes through all sgls to updates the physical
3878 * XRIs assigned due to port function reset. During port initialization, the
3879 * current els and allocated scsi sgl lists are 0s.
3880 *
3881 * Return codes
3882 * 0 - successful (for now, it always returns 0)
3883 **/
3884int
3885lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3886{
3887 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3888 uint16_t i, lxri, xri_cnt, els_xri_cnt;
James Smart6c621a22017-05-15 15:20:45 -07003889 uint16_t nvmet_xri_cnt;
James Smartf358dd02017-02-12 13:52:34 -08003890 LIST_HEAD(nvmet_sgl_list);
3891 int rc;
3892
3893 /*
3894 * update on pci function's nvmet xri-sgl list
3895 */
3896 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
James Smart61f3d4b2017-05-15 15:20:41 -07003897
James Smart6c621a22017-05-15 15:20:45 -07003898 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3899 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
James Smartf358dd02017-02-12 13:52:34 -08003900 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3901 /* els xri-sgl expanded */
3902 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3903 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3904 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3905 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3906 /* allocate the additional nvmet sgls */
3907 for (i = 0; i < xri_cnt; i++) {
3908 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3909 GFP_KERNEL);
3910 if (sglq_entry == NULL) {
Dick Kennedy372c1872020-06-30 14:50:00 -07003911 lpfc_printf_log(phba, KERN_ERR,
3912 LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08003913 "6303 Failure to allocate an "
3914 "NVMET sgl entry:%d\n", i);
3915 rc = -ENOMEM;
3916 goto out_free_mem;
3917 }
3918 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3919 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3920 &sglq_entry->phys);
3921 if (sglq_entry->virt == NULL) {
3922 kfree(sglq_entry);
Dick Kennedy372c1872020-06-30 14:50:00 -07003923 lpfc_printf_log(phba, KERN_ERR,
3924 LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08003925 "6304 Failure to allocate an "
3926 "NVMET buf:%d\n", i);
3927 rc = -ENOMEM;
3928 goto out_free_mem;
3929 }
3930 sglq_entry->sgl = sglq_entry->virt;
3931 memset(sglq_entry->sgl, 0,
3932 phba->cfg_sg_dma_buf_size);
3933 sglq_entry->state = SGL_FREED;
3934 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3935 }
3936 spin_lock_irq(&phba->hbalock);
3937 spin_lock(&phba->sli4_hba.sgl_list_lock);
3938 list_splice_init(&nvmet_sgl_list,
3939 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3940 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3941 spin_unlock_irq(&phba->hbalock);
3942 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3943 /* nvmet xri-sgl shrunk */
3944 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3946 "6305 NVMET xri-sgl count decreased from "
3947 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3948 nvmet_xri_cnt);
3949 spin_lock_irq(&phba->hbalock);
3950 spin_lock(&phba->sli4_hba.sgl_list_lock);
3951 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3952 &nvmet_sgl_list);
3953 /* release extra nvmet sgls from list */
3954 for (i = 0; i < xri_cnt; i++) {
3955 list_remove_head(&nvmet_sgl_list,
3956 sglq_entry, struct lpfc_sglq, list);
3957 if (sglq_entry) {
3958 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3959 sglq_entry->phys);
3960 kfree(sglq_entry);
3961 }
3962 }
3963 list_splice_init(&nvmet_sgl_list,
3964 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3965 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3966 spin_unlock_irq(&phba->hbalock);
3967 } else
3968 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3969 "6306 NVMET xri-sgl count unchanged: %d\n",
3970 nvmet_xri_cnt);
3971 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3972
3973 /* update xris to nvmet sgls on the list */
3974 sglq_entry = NULL;
3975 sglq_entry_next = NULL;
3976 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3977 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3978 lxri = lpfc_sli4_next_xritag(phba);
3979 if (lxri == NO_XRI) {
Dick Kennedy372c1872020-06-30 14:50:00 -07003980 lpfc_printf_log(phba, KERN_ERR,
3981 LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08003982 "6307 Failed to allocate xri for "
3983 "NVMET sgl\n");
3984 rc = -ENOMEM;
3985 goto out_free_mem;
3986 }
3987 sglq_entry->sli4_lxritag = lxri;
3988 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3989 }
3990 return 0;
3991
3992out_free_mem:
3993 lpfc_free_nvmet_sgl_list(phba);
3994 return rc;
3995}
3996
James Smart5e5b5112019-01-28 11:14:22 -08003997int
3998lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3999{
4000 LIST_HEAD(blist);
4001 struct lpfc_sli4_hdw_queue *qp;
James Smartc4908502019-01-28 11:14:28 -08004002 struct lpfc_io_buf *lpfc_cmd;
4003 struct lpfc_io_buf *iobufp, *prev_iobufp;
James Smart5e5b5112019-01-28 11:14:22 -08004004 int idx, cnt, xri, inserted;
4005
4006 cnt = 0;
4007 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4008 qp = &phba->sli4_hba.hdwq[idx];
4009 spin_lock_irq(&qp->io_buf_list_get_lock);
4010 spin_lock(&qp->io_buf_list_put_lock);
4011
4012 /* Take everything off the get and put lists */
4013 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4014 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4015 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4016 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4017 cnt += qp->get_io_bufs + qp->put_io_bufs;
4018 qp->get_io_bufs = 0;
4019 qp->put_io_bufs = 0;
4020 qp->total_io_bufs = 0;
4021 spin_unlock(&qp->io_buf_list_put_lock);
4022 spin_unlock_irq(&qp->io_buf_list_get_lock);
4023 }
4024
4025 /*
4026 * Take IO buffers off blist and put on cbuf sorted by XRI.
4027 * This is because POST_SGL takes a sequential range of XRIs
4028 * to post to the firmware.
4029 */
4030 for (idx = 0; idx < cnt; idx++) {
James Smartc4908502019-01-28 11:14:28 -08004031 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
James Smart5e5b5112019-01-28 11:14:22 -08004032 if (!lpfc_cmd)
4033 return cnt;
4034 if (idx == 0) {
4035 list_add_tail(&lpfc_cmd->list, cbuf);
4036 continue;
4037 }
4038 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4039 inserted = 0;
4040 prev_iobufp = NULL;
4041 list_for_each_entry(iobufp, cbuf, list) {
4042 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4043 if (prev_iobufp)
4044 list_add(&lpfc_cmd->list,
4045 &prev_iobufp->list);
4046 else
4047 list_add(&lpfc_cmd->list, cbuf);
4048 inserted = 1;
4049 break;
4050 }
4051 prev_iobufp = iobufp;
4052 }
4053 if (!inserted)
4054 list_add_tail(&lpfc_cmd->list, cbuf);
4055 }
4056 return cnt;
4057}
4058
4059int
4060lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4061{
4062 struct lpfc_sli4_hdw_queue *qp;
James Smartc4908502019-01-28 11:14:28 -08004063 struct lpfc_io_buf *lpfc_cmd;
James Smart5e5b5112019-01-28 11:14:22 -08004064 int idx, cnt;
4065
4066 qp = phba->sli4_hba.hdwq;
4067 cnt = 0;
4068 while (!list_empty(cbuf)) {
4069 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4070 list_remove_head(cbuf, lpfc_cmd,
James Smartc4908502019-01-28 11:14:28 -08004071 struct lpfc_io_buf, list);
James Smart5e5b5112019-01-28 11:14:22 -08004072 if (!lpfc_cmd)
4073 return cnt;
4074 cnt++;
4075 qp = &phba->sli4_hba.hdwq[idx];
James Smart1fbf9742019-01-28 11:14:26 -08004076 lpfc_cmd->hdwq_no = idx;
4077 lpfc_cmd->hdwq = qp;
James Smart5e5b5112019-01-28 11:14:22 -08004078 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4079 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4080 spin_lock(&qp->io_buf_list_put_lock);
4081 list_add_tail(&lpfc_cmd->list,
4082 &qp->lpfc_io_buf_list_put);
4083 qp->put_io_bufs++;
4084 qp->total_io_bufs++;
4085 spin_unlock(&qp->io_buf_list_put_lock);
4086 }
4087 }
4088 return cnt;
4089}
4090
James Smartf358dd02017-02-12 13:52:34 -08004091/**
James Smart5e5b5112019-01-28 11:14:22 -08004092 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
James Smart895427b2017-02-12 13:52:30 -08004093 * @phba: pointer to lpfc hba data structure.
4094 *
4095 * This routine first calculates the sizes of the current els and allocated
4096 * scsi sgl lists, and then goes through all sgls to updates the physical
4097 * XRIs assigned due to port function reset. During port initialization, the
4098 * current els and allocated scsi sgl lists are 0s.
4099 *
4100 * Return codes
4101 * 0 - successful (for now, it always returns 0)
4102 **/
4103int
James Smart5e5b5112019-01-28 11:14:22 -08004104lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
James Smart895427b2017-02-12 13:52:30 -08004105{
James Smartc4908502019-01-28 11:14:28 -08004106 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
James Smart0794d602019-01-28 11:14:19 -08004107 uint16_t i, lxri, els_xri_cnt;
James Smart5e5b5112019-01-28 11:14:22 -08004108 uint16_t io_xri_cnt, io_xri_max;
4109 LIST_HEAD(io_sgl_list);
James Smart0794d602019-01-28 11:14:19 -08004110 int rc, cnt;
James Smart895427b2017-02-12 13:52:30 -08004111
4112 /*
James Smart0794d602019-01-28 11:14:19 -08004113 * update on pci function's allocated nvme xri-sgl list
James Smart895427b2017-02-12 13:52:30 -08004114 */
James Smart0794d602019-01-28 11:14:19 -08004115
4116 /* maximum number of xris available for nvme buffers */
James Smart895427b2017-02-12 13:52:30 -08004117 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
James Smart5e5b5112019-01-28 11:14:22 -08004118 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4119 phba->sli4_hba.io_xri_max = io_xri_max;
James Smart8a9d2e82012-05-09 21:16:12 -04004120
James Smarte8c0a772017-04-21 16:04:49 -07004121 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart0794d602019-01-28 11:14:19 -08004122 "6074 Current allocated XRI sgl count:%d, "
4123 "maximum XRI count:%d\n",
James Smart5e5b5112019-01-28 11:14:22 -08004124 phba->sli4_hba.io_xri_cnt,
4125 phba->sli4_hba.io_xri_max);
James Smarte8c0a772017-04-21 16:04:49 -07004126
James Smart5e5b5112019-01-28 11:14:22 -08004127 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
James Smart0794d602019-01-28 11:14:19 -08004128
James Smart5e5b5112019-01-28 11:14:22 -08004129 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
James Smart0794d602019-01-28 11:14:19 -08004130 /* max nvme xri shrunk below the allocated nvme buffers */
James Smart5e5b5112019-01-28 11:14:22 -08004131 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4132 phba->sli4_hba.io_xri_max;
James Smart0794d602019-01-28 11:14:19 -08004133 /* release the extra allocated nvme buffers */
James Smart5e5b5112019-01-28 11:14:22 -08004134 for (i = 0; i < io_xri_cnt; i++) {
4135 list_remove_head(&io_sgl_list, lpfc_ncmd,
James Smartc4908502019-01-28 11:14:28 -08004136 struct lpfc_io_buf, list);
James Smart0794d602019-01-28 11:14:19 -08004137 if (lpfc_ncmd) {
Romain Perier771db5c2017-07-06 10:13:05 +02004138 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
James Smart0794d602019-01-28 11:14:19 -08004139 lpfc_ncmd->data,
4140 lpfc_ncmd->dma_handle);
4141 kfree(lpfc_ncmd);
James Smarta2fc4aef2014-09-03 12:57:55 -04004142 }
James Smart8a9d2e82012-05-09 21:16:12 -04004143 }
James Smart5e5b5112019-01-28 11:14:22 -08004144 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04004145 }
4146
James Smart0794d602019-01-28 11:14:19 -08004147 /* update xris associated to remaining allocated nvme buffers */
4148 lpfc_ncmd = NULL;
4149 lpfc_ncmd_next = NULL;
James Smart5e5b5112019-01-28 11:14:22 -08004150 phba->sli4_hba.io_xri_cnt = cnt;
James Smart0794d602019-01-28 11:14:19 -08004151 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
James Smart5e5b5112019-01-28 11:14:22 -08004152 &io_sgl_list, list) {
James Smart8a9d2e82012-05-09 21:16:12 -04004153 lxri = lpfc_sli4_next_xritag(phba);
4154 if (lxri == NO_XRI) {
Dick Kennedy372c1872020-06-30 14:50:00 -07004155 lpfc_printf_log(phba, KERN_ERR,
4156 LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -08004157 "6075 Failed to allocate xri for "
4158 "nvme buffer\n");
James Smart8a9d2e82012-05-09 21:16:12 -04004159 rc = -ENOMEM;
4160 goto out_free_mem;
4161 }
James Smart0794d602019-01-28 11:14:19 -08004162 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4163 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
James Smart8a9d2e82012-05-09 21:16:12 -04004164 }
James Smart5e5b5112019-01-28 11:14:22 -08004165 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
dea31012005-04-17 16:05:31 -05004166 return 0;
James Smart8a9d2e82012-05-09 21:16:12 -04004167
4168out_free_mem:
James Smart5e5b5112019-01-28 11:14:22 -08004169 lpfc_io_free(phba);
James Smart8a9d2e82012-05-09 21:16:12 -04004170 return rc;
dea31012005-04-17 16:05:31 -05004171}
4172
James Smart0794d602019-01-28 11:14:19 -08004173/**
James Smart5e5b5112019-01-28 11:14:22 -08004174 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
Lee Jonesfe614ac2020-07-23 13:24:22 +01004175 * @phba: Pointer to lpfc hba data structure.
4176 * @num_to_alloc: The requested number of buffers to allocate.
James Smart0794d602019-01-28 11:14:19 -08004177 *
4178 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4179 * the nvme buffer contains all the necessary information needed to initiate
4180 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4181 * them on a list, it post them to the port by using SGL block post.
4182 *
4183 * Return codes:
James Smart5e5b5112019-01-28 11:14:22 -08004184 * int - number of IO buffers that were allocated and posted.
James Smart0794d602019-01-28 11:14:19 -08004185 * 0 = failure, less than num_to_alloc is a partial failure.
4186 **/
4187int
James Smart5e5b5112019-01-28 11:14:22 -08004188lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
James Smart0794d602019-01-28 11:14:19 -08004189{
James Smartc4908502019-01-28 11:14:28 -08004190 struct lpfc_io_buf *lpfc_ncmd;
James Smart0794d602019-01-28 11:14:19 -08004191 struct lpfc_iocbq *pwqeq;
4192 uint16_t iotag, lxri = 0;
4193 int bcnt, num_posted;
4194 LIST_HEAD(prep_nblist);
4195 LIST_HEAD(post_nblist);
4196 LIST_HEAD(nvme_nblist);
4197
James Smart5e5b5112019-01-28 11:14:22 -08004198 phba->sli4_hba.io_xri_cnt = 0;
James Smart0794d602019-01-28 11:14:19 -08004199 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
James Smart7f9989b2019-08-27 14:27:46 -07004200 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
James Smart0794d602019-01-28 11:14:19 -08004201 if (!lpfc_ncmd)
4202 break;
4203 /*
4204 * Get memory from the pci pool to map the virt space to
4205 * pci bus space for an I/O. The DMA buffer includes the
4206 * number of SGE's necessary to support the sg_tablesize.
4207 */
Thomas Meyera5c990e2019-05-29 22:21:36 +02004208 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4209 GFP_KERNEL,
4210 &lpfc_ncmd->dma_handle);
James Smart0794d602019-01-28 11:14:19 -08004211 if (!lpfc_ncmd->data) {
4212 kfree(lpfc_ncmd);
4213 break;
4214 }
James Smart0794d602019-01-28 11:14:19 -08004215
James Smartd79c9e92019-08-14 16:57:09 -07004216 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4217 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4218 } else {
4219 /*
4220 * 4K Page alignment is CRITICAL to BlockGuard, double
4221 * check to be sure.
4222 */
4223 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4224 (((unsigned long)(lpfc_ncmd->data) &
4225 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07004226 lpfc_printf_log(phba, KERN_ERR,
4227 LOG_TRACE_EVENT,
James Smartd79c9e92019-08-14 16:57:09 -07004228 "3369 Memory alignment err: "
4229 "addr=%lx\n",
4230 (unsigned long)lpfc_ncmd->data);
4231 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4232 lpfc_ncmd->data,
4233 lpfc_ncmd->dma_handle);
4234 kfree(lpfc_ncmd);
4235 break;
4236 }
James Smart0794d602019-01-28 11:14:19 -08004237 }
4238
James Smartd79c9e92019-08-14 16:57:09 -07004239 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4240
James Smart0794d602019-01-28 11:14:19 -08004241 lxri = lpfc_sli4_next_xritag(phba);
4242 if (lxri == NO_XRI) {
4243 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4244 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4245 kfree(lpfc_ncmd);
4246 break;
4247 }
4248 pwqeq = &lpfc_ncmd->cur_iocbq;
4249
4250 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4251 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4252 if (iotag == 0) {
4253 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4254 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4255 kfree(lpfc_ncmd);
Dick Kennedy372c1872020-06-30 14:50:00 -07004256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -08004257 "6121 Failed to allocate IOTAG for"
4258 " XRI:0x%x\n", lxri);
4259 lpfc_sli4_free_xri(phba, lxri);
4260 break;
4261 }
4262 pwqeq->sli4_lxritag = lxri;
4263 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4264 pwqeq->context1 = lpfc_ncmd;
4265
4266 /* Initialize local short-hand pointers. */
4267 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4268 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4269 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
James Smartc2017262019-01-28 11:14:37 -08004270 spin_lock_init(&lpfc_ncmd->buf_lock);
James Smart0794d602019-01-28 11:14:19 -08004271
4272 /* add the nvme buffer to a post list */
4273 list_add_tail(&lpfc_ncmd->list, &post_nblist);
James Smart5e5b5112019-01-28 11:14:22 -08004274 phba->sli4_hba.io_xri_cnt++;
James Smart0794d602019-01-28 11:14:19 -08004275 }
4276 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4277 "6114 Allocate %d out of %d requested new NVME "
4278 "buffers\n", bcnt, num_to_alloc);
4279
4280 /* post the list of nvme buffer sgls to port if available */
4281 if (!list_empty(&post_nblist))
James Smart5e5b5112019-01-28 11:14:22 -08004282 num_posted = lpfc_sli4_post_io_sgl_list(
James Smart0794d602019-01-28 11:14:19 -08004283 phba, &post_nblist, bcnt);
4284 else
4285 num_posted = 0;
4286
4287 return num_posted;
4288}
4289
James Smart96418b52017-03-04 09:30:31 -08004290static uint64_t
4291lpfc_get_wwpn(struct lpfc_hba *phba)
4292{
4293 uint64_t wwn;
4294 int rc;
4295 LPFC_MBOXQ_t *mboxq;
4296 MAILBOX_t *mb;
4297
James Smart96418b52017-03-04 09:30:31 -08004298 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4299 GFP_KERNEL);
4300 if (!mboxq)
4301 return (uint64_t)-1;
4302
4303 /* First get WWN of HBA instance */
4304 lpfc_read_nv(phba, mboxq);
4305 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4306 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07004307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart96418b52017-03-04 09:30:31 -08004308 "6019 Mailbox failed , mbxCmd x%x "
4309 "READ_NV, mbxStatus x%x\n",
4310 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4311 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4312 mempool_free(mboxq, phba->mbox_mem_pool);
4313 return (uint64_t) -1;
4314 }
4315 mb = &mboxq->u.mb;
4316 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4317 /* wwn is WWPN of HBA instance */
4318 mempool_free(mboxq, phba->mbox_mem_pool);
4319 if (phba->sli_rev == LPFC_SLI_REV4)
4320 return be64_to_cpu(wwn);
4321 else
Maurizio Lombardi286871a2017-08-23 16:55:48 -07004322 return rol64(wwn, 32);
James Smart96418b52017-03-04 09:30:31 -08004323}
4324
James Smarte59058c2008-08-24 21:49:00 -04004325/**
Gaurav Srivastava5e633302021-06-08 10:05:49 +05304326 * lpfc_vmid_res_alloc - Allocates resources for VMID
4327 * @phba: pointer to lpfc hba data structure.
4328 * @vport: pointer to vport data structure
4329 *
4330 * This routine allocated the resources needed for the VMID.
4331 *
4332 * Return codes
4333 * 0 on Success
4334 * Non-0 on Failure
4335 */
4336static int
4337lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4338{
4339 /* VMID feature is supported only on SLI4 */
4340 if (phba->sli_rev == LPFC_SLI_REV3) {
4341 phba->cfg_vmid_app_header = 0;
4342 phba->cfg_vmid_priority_tagging = 0;
4343 }
4344
4345 if (lpfc_is_vmid_enabled(phba)) {
4346 vport->vmid =
4347 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4348 GFP_KERNEL);
4349 if (!vport->vmid)
4350 return -ENOMEM;
4351
4352 rwlock_init(&vport->vmid_lock);
4353
4354 /* Set the VMID parameters for the vport */
4355 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4356 vport->vmid_inactivity_timeout =
4357 phba->cfg_vmid_inactivity_timeout;
4358 vport->max_vmid = phba->cfg_max_vmid;
4359 vport->cur_vmid_cnt = 0;
4360
4361 vport->vmid_priority_range = bitmap_zalloc
4362 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4363
4364 if (!vport->vmid_priority_range) {
4365 kfree(vport->vmid);
4366 return -ENOMEM;
4367 }
4368
4369 hash_init(vport->hash_table);
4370 }
4371 return 0;
4372}
4373
4374/**
James Smart3621a712009-04-06 18:47:14 -04004375 * lpfc_create_port - Create an FC port
James Smarte59058c2008-08-24 21:49:00 -04004376 * @phba: pointer to lpfc hba data structure.
4377 * @instance: a unique integer ID to this FC port.
4378 * @dev: pointer to the device data structure.
4379 *
4380 * This routine creates a FC port for the upper layer protocol. The FC port
4381 * can be created on top of either a physical port or a virtual port provided
4382 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4383 * and associates the FC port created before adding the shost into the SCSI
4384 * layer.
4385 *
4386 * Return codes
4387 * @vport - pointer to the virtual N_Port data structure.
4388 * NULL - port create failed.
4389 **/
James Smart2e0fef82007-06-17 19:56:36 -05004390struct lpfc_vport *
James Smart3de2a652007-08-02 11:09:59 -04004391lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
James Smart47a86172007-04-25 09:53:22 -04004392{
James Smart2e0fef82007-06-17 19:56:36 -05004393 struct lpfc_vport *vport;
James Smart895427b2017-02-12 13:52:30 -08004394 struct Scsi_Host *shost = NULL;
James Smartc90b4482020-03-22 11:12:56 -07004395 struct scsi_host_template *template;
James Smart2e0fef82007-06-17 19:56:36 -05004396 int error = 0;
James Smart96418b52017-03-04 09:30:31 -08004397 int i;
4398 uint64_t wwn;
4399 bool use_no_reset_hba = false;
James Smart56bc8022017-06-15 22:56:43 -07004400 int rc;
James Smart96418b52017-03-04 09:30:31 -08004401
James Smart56bc8022017-06-15 22:56:43 -07004402 if (lpfc_no_hba_reset_cnt) {
4403 if (phba->sli_rev < LPFC_SLI_REV4 &&
4404 dev == &phba->pcidev->dev) {
4405 /* Reset the port first */
4406 lpfc_sli_brdrestart(phba);
4407 rc = lpfc_sli_chipset_init(phba);
4408 if (rc)
4409 return NULL;
4410 }
4411 wwn = lpfc_get_wwpn(phba);
4412 }
James Smart96418b52017-03-04 09:30:31 -08004413
4414 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4415 if (wwn == lpfc_no_hba_reset[i]) {
Dick Kennedy372c1872020-06-30 14:50:00 -07004416 lpfc_printf_log(phba, KERN_ERR,
4417 LOG_TRACE_EVENT,
James Smart96418b52017-03-04 09:30:31 -08004418 "6020 Setting use_no_reset port=%llx\n",
4419 wwn);
4420 use_no_reset_hba = true;
4421 break;
4422 }
4423 }
James Smart47a86172007-04-25 09:53:22 -04004424
James Smartc90b4482020-03-22 11:12:56 -07004425 /* Seed template for SCSI host registration */
4426 if (dev == &phba->pcidev->dev) {
4427 template = &phba->port_template;
4428
4429 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4430 /* Seed physical port template */
4431 memcpy(template, &lpfc_template, sizeof(*template));
4432
James Smart7c30bb62020-10-20 13:27:16 -07004433 if (use_no_reset_hba)
James Smartc90b4482020-03-22 11:12:56 -07004434 /* template is for a no reset SCSI Host */
James Smartc90b4482020-03-22 11:12:56 -07004435 template->eh_host_reset_handler = NULL;
James Smartc90b4482020-03-22 11:12:56 -07004436
4437 /* Template for all vports this physical port creates */
4438 memcpy(&phba->vport_template, &lpfc_template,
4439 sizeof(*template));
James Smartc90b4482020-03-22 11:12:56 -07004440 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4441 phba->vport_template.eh_bus_reset_handler = NULL;
4442 phba->vport_template.eh_host_reset_handler = NULL;
4443 phba->vport_template.vendor_id = 0;
4444
4445 /* Initialize the host templates with updated value */
4446 if (phba->sli_rev == LPFC_SLI_REV4) {
4447 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4448 phba->vport_template.sg_tablesize =
4449 phba->cfg_scsi_seg_cnt;
4450 } else {
4451 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4452 phba->vport_template.sg_tablesize =
4453 phba->cfg_sg_seg_cnt;
4454 }
4455
James Smart895427b2017-02-12 13:52:30 -08004456 } else {
James Smartc90b4482020-03-22 11:12:56 -07004457 /* NVMET is for physical port only */
4458 memcpy(template, &lpfc_template_nvme,
4459 sizeof(*template));
James Smart895427b2017-02-12 13:52:30 -08004460 }
James Smartc90b4482020-03-22 11:12:56 -07004461 } else {
4462 template = &phba->vport_template;
James Smartea4142f2015-04-07 15:07:13 -04004463 }
James Smartc90b4482020-03-22 11:12:56 -07004464
4465 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
James Smart2e0fef82007-06-17 19:56:36 -05004466 if (!shost)
4467 goto out;
James Smart47a86172007-04-25 09:53:22 -04004468
James Smart2e0fef82007-06-17 19:56:36 -05004469 vport = (struct lpfc_vport *) shost->hostdata;
4470 vport->phba = phba;
James Smart2e0fef82007-06-17 19:56:36 -05004471 vport->load_flag |= FC_LOADING;
James Smart92d7f7b2007-06-17 19:56:38 -05004472 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
James Smart7f5f3d02008-02-08 18:50:14 -05004473 vport->fc_rscn_flush = 0;
James Smart3de2a652007-08-02 11:09:59 -04004474 lpfc_get_vport_cfgparam(vport);
James Smart895427b2017-02-12 13:52:30 -08004475
James Smartf6e84792019-01-28 11:14:38 -08004476 /* Adjust value in vport */
4477 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4478
James Smart2e0fef82007-06-17 19:56:36 -05004479 shost->unique_id = instance;
4480 shost->max_id = LPFC_MAX_TARGET;
James Smart3de2a652007-08-02 11:09:59 -04004481 shost->max_lun = vport->cfg_max_luns;
James Smart2e0fef82007-06-17 19:56:36 -05004482 shost->this_id = -1;
4483 shost->max_cmd_len = 16;
James Smart6a828b02019-01-28 11:14:31 -08004484
James Smartda0436e2009-05-22 14:51:39 -04004485 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart77ffd342019-08-15 19:36:49 -07004486 if (!phba->cfg_fcp_mq_threshold ||
4487 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4488 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4489
4490 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4491 phba->cfg_fcp_mq_threshold);
James Smart6a828b02019-01-28 11:14:31 -08004492
James Smart28baac72010-02-12 14:42:03 -05004493 shost->dma_boundary =
James Smartcb5172e2010-03-15 11:25:07 -04004494 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
James Smartd79c9e92019-08-14 16:57:09 -07004495
4496 if (phba->cfg_xpsgl && !phba->nvmet_support)
4497 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4498 else
4499 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
James Smartace44e42019-01-28 11:14:27 -08004500 } else
4501 /* SLI-3 has a limited number of hardware queues (3),
4502 * thus there is only one for FCP processing.
4503 */
4504 shost->nr_hw_queues = 1;
James Smart81301a92008-12-04 22:39:46 -05004505
James Smart47a86172007-04-25 09:53:22 -04004506 /*
James Smart2e0fef82007-06-17 19:56:36 -05004507 * Set initial can_queue value since 0 is no longer supported and
4508 * scsi_add_host will fail. This will be adjusted later based on the
4509 * max xri value determined in hba setup.
James Smart47a86172007-04-25 09:53:22 -04004510 */
James Smart2e0fef82007-06-17 19:56:36 -05004511 shost->can_queue = phba->cfg_hba_queue_depth - 10;
James Smart3de2a652007-08-02 11:09:59 -04004512 if (dev != &phba->pcidev->dev) {
James Smart92d7f7b2007-06-17 19:56:38 -05004513 shost->transportt = lpfc_vport_transport_template;
4514 vport->port_type = LPFC_NPIV_PORT;
4515 } else {
4516 shost->transportt = lpfc_transport_template;
4517 vport->port_type = LPFC_PHYSICAL_PORT;
4518 }
James Smart47a86172007-04-25 09:53:22 -04004519
James Smartc90b4482020-03-22 11:12:56 -07004520 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4521 "9081 CreatePort TMPLATE type %x TBLsize %d "
4522 "SEGcnt %d/%d\n",
4523 vport->port_type, shost->sg_tablesize,
4524 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4525
Gaurav Srivastava5e633302021-06-08 10:05:49 +05304526 /* Allocate the resources for VMID */
4527 rc = lpfc_vmid_res_alloc(phba, vport);
4528
4529 if (rc)
4530 goto out;
4531
James Smart2e0fef82007-06-17 19:56:36 -05004532 /* Initialize all internally managed lists. */
4533 INIT_LIST_HEAD(&vport->fc_nodes);
James Smartda0436e2009-05-22 14:51:39 -04004534 INIT_LIST_HEAD(&vport->rcv_buffer_list);
James Smart2e0fef82007-06-17 19:56:36 -05004535 spin_lock_init(&vport->work_port_lock);
James Smart47a86172007-04-25 09:53:22 -04004536
Kees Cookf22eb4d2017-09-06 20:24:26 -07004537 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
James Smart47a86172007-04-25 09:53:22 -04004538
Kees Cookf22eb4d2017-09-06 20:24:26 -07004539 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
James Smart92494142011-02-16 12:39:44 -05004540
Kees Cookf22eb4d2017-09-06 20:24:26 -07004541 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
James Smart92494142011-02-16 12:39:44 -05004542
James Smartaa6ff302019-05-21 17:49:09 -07004543 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4544 lpfc_setup_bg(phba, shost);
4545
James Bottomleyd139b9b2009-11-05 13:33:12 -06004546 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
James Smart2e0fef82007-06-17 19:56:36 -05004547 if (error)
4548 goto out_put_shost;
James Smart47a86172007-04-25 09:53:22 -04004549
James Smart523128e2018-09-10 10:30:46 -07004550 spin_lock_irq(&phba->port_list_lock);
James Smart2e0fef82007-06-17 19:56:36 -05004551 list_add_tail(&vport->listentry, &phba->port_list);
James Smart523128e2018-09-10 10:30:46 -07004552 spin_unlock_irq(&phba->port_list_lock);
James Smart2e0fef82007-06-17 19:56:36 -05004553 return vport;
James Smart47a86172007-04-25 09:53:22 -04004554
James Smart2e0fef82007-06-17 19:56:36 -05004555out_put_shost:
Gaurav Srivastava5e633302021-06-08 10:05:49 +05304556 kfree(vport->vmid);
4557 bitmap_free(vport->vmid_priority_range);
James Smart2e0fef82007-06-17 19:56:36 -05004558 scsi_host_put(shost);
4559out:
4560 return NULL;
James Smart47a86172007-04-25 09:53:22 -04004561}
4562
James Smarte59058c2008-08-24 21:49:00 -04004563/**
James Smart3621a712009-04-06 18:47:14 -04004564 * destroy_port - destroy an FC port
James Smarte59058c2008-08-24 21:49:00 -04004565 * @vport: pointer to an lpfc virtual N_Port data structure.
4566 *
4567 * This routine destroys a FC port from the upper layer protocol. All the
4568 * resources associated with the port are released.
4569 **/
James Smart2e0fef82007-06-17 19:56:36 -05004570void
4571destroy_port(struct lpfc_vport *vport)
James Smart47a86172007-04-25 09:53:22 -04004572{
James Smart92d7f7b2007-06-17 19:56:38 -05004573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4574 struct lpfc_hba *phba = vport->phba;
James Smart47a86172007-04-25 09:53:22 -04004575
James Smart858c9f62007-06-17 19:56:39 -05004576 lpfc_debugfs_terminate(vport);
James Smart92d7f7b2007-06-17 19:56:38 -05004577 fc_remove_host(shost);
4578 scsi_remove_host(shost);
James Smart47a86172007-04-25 09:53:22 -04004579
James Smart523128e2018-09-10 10:30:46 -07004580 spin_lock_irq(&phba->port_list_lock);
James Smart92d7f7b2007-06-17 19:56:38 -05004581 list_del_init(&vport->listentry);
James Smart523128e2018-09-10 10:30:46 -07004582 spin_unlock_irq(&phba->port_list_lock);
James Smart47a86172007-04-25 09:53:22 -04004583
James Smart92d7f7b2007-06-17 19:56:38 -05004584 lpfc_cleanup(vport);
James Smart47a86172007-04-25 09:53:22 -04004585 return;
James Smart47a86172007-04-25 09:53:22 -04004586}
4587
James Smarte59058c2008-08-24 21:49:00 -04004588/**
James Smart3621a712009-04-06 18:47:14 -04004589 * lpfc_get_instance - Get a unique integer ID
James Smarte59058c2008-08-24 21:49:00 -04004590 *
4591 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4592 * uses the kernel idr facility to perform the task.
4593 *
4594 * Return codes:
4595 * instance - a unique integer ID allocated as the new instance.
4596 * -1 - lpfc get instance failed.
4597 **/
James Smart92d7f7b2007-06-17 19:56:38 -05004598int
4599lpfc_get_instance(void)
4600{
Tejun Heoab516032013-02-27 17:04:44 -08004601 int ret;
James Smart92d7f7b2007-06-17 19:56:38 -05004602
Tejun Heoab516032013-02-27 17:04:44 -08004603 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4604 return ret < 0 ? -1 : ret;
James Smart92d7f7b2007-06-17 19:56:38 -05004605}
4606
James Smarte59058c2008-08-24 21:49:00 -04004607/**
James Smart3621a712009-04-06 18:47:14 -04004608 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
James Smarte59058c2008-08-24 21:49:00 -04004609 * @shost: pointer to SCSI host data structure.
4610 * @time: elapsed time of the scan in jiffies.
4611 *
4612 * This routine is called by the SCSI layer with a SCSI host to determine
4613 * whether the scan host is finished.
4614 *
4615 * Note: there is no scan_start function as adapter initialization will have
4616 * asynchronously kicked off the link initialization.
4617 *
4618 * Return codes
4619 * 0 - SCSI host scan is not over yet.
4620 * 1 - SCSI host scan is over.
4621 **/
James Smart47a86172007-04-25 09:53:22 -04004622int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4623{
James Smart2e0fef82007-06-17 19:56:36 -05004624 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4625 struct lpfc_hba *phba = vport->phba;
James Smart858c9f62007-06-17 19:56:39 -05004626 int stat = 0;
James Smart47a86172007-04-25 09:53:22 -04004627
James Smart858c9f62007-06-17 19:56:39 -05004628 spin_lock_irq(shost->host_lock);
4629
James Smart51ef4c22007-08-02 11:10:31 -04004630 if (vport->load_flag & FC_UNLOADING) {
James Smart858c9f62007-06-17 19:56:39 -05004631 stat = 1;
James Smart47a86172007-04-25 09:53:22 -04004632 goto finished;
James Smart858c9f62007-06-17 19:56:39 -05004633 }
James Smart256ec0d2013-04-17 20:14:58 -04004634 if (time >= msecs_to_jiffies(30 * 1000)) {
James Smart2e0fef82007-06-17 19:56:36 -05004635 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004636 "0461 Scanning longer than 30 "
4637 "seconds. Continuing initialization\n");
James Smart858c9f62007-06-17 19:56:39 -05004638 stat = 1;
James Smart47a86172007-04-25 09:53:22 -04004639 goto finished;
James Smart2e0fef82007-06-17 19:56:36 -05004640 }
James Smart256ec0d2013-04-17 20:14:58 -04004641 if (time >= msecs_to_jiffies(15 * 1000) &&
4642 phba->link_state <= LPFC_LINK_DOWN) {
James Smart2e0fef82007-06-17 19:56:36 -05004643 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004644 "0465 Link down longer than 15 "
4645 "seconds. Continuing initialization\n");
James Smart858c9f62007-06-17 19:56:39 -05004646 stat = 1;
James Smart2e0fef82007-06-17 19:56:36 -05004647 goto finished;
James Smart47a86172007-04-25 09:53:22 -04004648 }
4649
James Smart2e0fef82007-06-17 19:56:36 -05004650 if (vport->port_state != LPFC_VPORT_READY)
James Smart858c9f62007-06-17 19:56:39 -05004651 goto finished;
James Smart2e0fef82007-06-17 19:56:36 -05004652 if (vport->num_disc_nodes || vport->fc_prli_sent)
James Smart858c9f62007-06-17 19:56:39 -05004653 goto finished;
James Smart256ec0d2013-04-17 20:14:58 -04004654 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
James Smart858c9f62007-06-17 19:56:39 -05004655 goto finished;
James Smart2e0fef82007-06-17 19:56:36 -05004656 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
James Smart858c9f62007-06-17 19:56:39 -05004657 goto finished;
4658
4659 stat = 1;
James Smart47a86172007-04-25 09:53:22 -04004660
4661finished:
James Smart858c9f62007-06-17 19:56:39 -05004662 spin_unlock_irq(shost->host_lock);
4663 return stat;
James Smart92d7f7b2007-06-17 19:56:38 -05004664}
4665
Bart Van Assche3999df72019-03-28 11:06:16 -07004666static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
James Smartcd713482018-10-23 13:41:01 -07004667{
4668 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4669 struct lpfc_hba *phba = vport->phba;
4670
4671 fc_host_supported_speeds(shost) = 0;
Dick Kennedya1e4d3d2020-08-03 14:02:22 -07004672 /*
4673 * Avoid reporting supported link speed for FCoE as it can't be
4674 * controlled via FCoE.
4675 */
4676 if (phba->hba_flag & HBA_FCOE_MODE)
4677 return;
4678
James Smart1dc5ec22018-10-23 13:41:11 -07004679 if (phba->lmt & LMT_128Gb)
4680 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
James Smartcd713482018-10-23 13:41:01 -07004681 if (phba->lmt & LMT_64Gb)
4682 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4683 if (phba->lmt & LMT_32Gb)
4684 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4685 if (phba->lmt & LMT_16Gb)
4686 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4687 if (phba->lmt & LMT_10Gb)
4688 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4689 if (phba->lmt & LMT_8Gb)
4690 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4691 if (phba->lmt & LMT_4Gb)
4692 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4693 if (phba->lmt & LMT_2Gb)
4694 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4695 if (phba->lmt & LMT_1Gb)
4696 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4697}
4698
James Smarte59058c2008-08-24 21:49:00 -04004699/**
James Smart3621a712009-04-06 18:47:14 -04004700 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
James Smarte59058c2008-08-24 21:49:00 -04004701 * @shost: pointer to SCSI host data structure.
4702 *
4703 * This routine initializes a given SCSI host attributes on a FC port. The
4704 * SCSI host can be either on top of a physical port or a virtual port.
4705 **/
James Smart92d7f7b2007-06-17 19:56:38 -05004706void lpfc_host_attrib_init(struct Scsi_Host *shost)
4707{
4708 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4709 struct lpfc_hba *phba = vport->phba;
James Smart47a86172007-04-25 09:53:22 -04004710 /*
James Smart2e0fef82007-06-17 19:56:36 -05004711 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
James Smart47a86172007-04-25 09:53:22 -04004712 */
4713
James Smart2e0fef82007-06-17 19:56:36 -05004714 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4715 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
James Smart47a86172007-04-25 09:53:22 -04004716 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4717
4718 memset(fc_host_supported_fc4s(shost), 0,
James Smart2e0fef82007-06-17 19:56:36 -05004719 sizeof(fc_host_supported_fc4s(shost)));
James Smart47a86172007-04-25 09:53:22 -04004720 fc_host_supported_fc4s(shost)[2] = 1;
4721 fc_host_supported_fc4s(shost)[7] = 1;
4722
James Smart92d7f7b2007-06-17 19:56:38 -05004723 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4724 sizeof fc_host_symbolic_name(shost));
James Smart47a86172007-04-25 09:53:22 -04004725
James Smartcd713482018-10-23 13:41:01 -07004726 lpfc_host_supported_speeds_set(shost);
James Smart47a86172007-04-25 09:53:22 -04004727
4728 fc_host_maxframe_size(shost) =
James Smart2e0fef82007-06-17 19:56:36 -05004729 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4730 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
James Smart47a86172007-04-25 09:53:22 -04004731
Mike Christie0af5d702010-09-15 16:52:31 -05004732 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4733
James Smart47a86172007-04-25 09:53:22 -04004734 /* This value is also unchanging */
4735 memset(fc_host_active_fc4s(shost), 0,
James Smart2e0fef82007-06-17 19:56:36 -05004736 sizeof(fc_host_active_fc4s(shost)));
James Smart47a86172007-04-25 09:53:22 -04004737 fc_host_active_fc4s(shost)[2] = 1;
4738 fc_host_active_fc4s(shost)[7] = 1;
4739
James Smart92d7f7b2007-06-17 19:56:38 -05004740 fc_host_max_npiv_vports(shost) = phba->max_vpi;
James Smart47a86172007-04-25 09:53:22 -04004741 spin_lock_irq(shost->host_lock);
James Smart51ef4c22007-08-02 11:10:31 -04004742 vport->load_flag &= ~FC_LOADING;
James Smart47a86172007-04-25 09:53:22 -04004743 spin_unlock_irq(shost->host_lock);
James Smart47a86172007-04-25 09:53:22 -04004744}
dea31012005-04-17 16:05:31 -05004745
James Smarte59058c2008-08-24 21:49:00 -04004746/**
James Smartda0436e2009-05-22 14:51:39 -04004747 * lpfc_stop_port_s3 - Stop SLI3 device port
James Smarte59058c2008-08-24 21:49:00 -04004748 * @phba: pointer to lpfc hba data structure.
4749 *
James Smartda0436e2009-05-22 14:51:39 -04004750 * This routine is invoked to stop an SLI3 device port, it stops the device
4751 * from generating interrupts and stops the device driver's timers for the
4752 * device.
James Smarte59058c2008-08-24 21:49:00 -04004753 **/
James Smartdb2378e2008-02-08 18:49:51 -05004754static void
James Smartda0436e2009-05-22 14:51:39 -04004755lpfc_stop_port_s3(struct lpfc_hba *phba)
James Smartdb2378e2008-02-08 18:49:51 -05004756{
James Smartda0436e2009-05-22 14:51:39 -04004757 /* Clear all interrupt enable conditions */
4758 writel(0, phba->HCregaddr);
4759 readl(phba->HCregaddr); /* flush */
4760 /* Clear all pending interrupts */
4761 writel(0xffffffff, phba->HAregaddr);
4762 readl(phba->HAregaddr); /* flush */
James Smart93996272008-08-24 21:50:30 -04004763
James Smartda0436e2009-05-22 14:51:39 -04004764 /* Reset some HBA SLI setup states */
4765 lpfc_stop_hba_timers(phba);
4766 phba->pport->work_port_events = 0;
James Smartdb2378e2008-02-08 18:49:51 -05004767}
4768
James Smarte59058c2008-08-24 21:49:00 -04004769/**
James Smartda0436e2009-05-22 14:51:39 -04004770 * lpfc_stop_port_s4 - Stop SLI4 device port
James Smart5b75da22008-12-04 22:39:35 -05004771 * @phba: pointer to lpfc hba data structure.
4772 *
James Smartda0436e2009-05-22 14:51:39 -04004773 * This routine is invoked to stop an SLI4 device port, it stops the device
4774 * from generating interrupts and stops the device driver's timers for the
4775 * device.
4776 **/
4777static void
4778lpfc_stop_port_s4(struct lpfc_hba *phba)
4779{
4780 /* Reset some HBA SLI4 setup states */
4781 lpfc_stop_hba_timers(phba);
James Smartcdb42be2019-01-28 11:14:21 -08004782 if (phba->pport)
4783 phba->pport->work_port_events = 0;
James Smartda0436e2009-05-22 14:51:39 -04004784 phba->sli4_hba.intr_enable = 0;
James Smartda0436e2009-05-22 14:51:39 -04004785}
4786
4787/**
4788 * lpfc_stop_port - Wrapper function for stopping hba port
4789 * @phba: Pointer to HBA context object.
James Smart5b75da22008-12-04 22:39:35 -05004790 *
James Smartda0436e2009-05-22 14:51:39 -04004791 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4792 * the API jump table function pointer from the lpfc_hba struct.
4793 **/
4794void
4795lpfc_stop_port(struct lpfc_hba *phba)
4796{
4797 phba->lpfc_stop_port(phba);
Dick Kennedyf485c182017-09-29 17:34:34 -07004798
4799 if (phba->wq)
4800 flush_workqueue(phba->wq);
James Smartda0436e2009-05-22 14:51:39 -04004801}
4802
4803/**
James Smartecfd03c2010-02-12 14:41:27 -05004804 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4805 * @phba: Pointer to hba for which this call is being executed.
4806 *
4807 * This routine starts the timer waiting for the FCF rediscovery to complete.
4808 **/
4809void
4810lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4811{
4812 unsigned long fcf_redisc_wait_tmo =
4813 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4814 /* Start fcf rediscovery wait period timer */
4815 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4816 spin_lock_irq(&phba->hbalock);
4817 /* Allow action to new fcf asynchronous event */
4818 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4819 /* Mark the FCF rediscovery pending state */
4820 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4821 spin_unlock_irq(&phba->hbalock);
4822}
4823
4824/**
4825 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
Lee Jonesfe614ac2020-07-23 13:24:22 +01004826 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
James Smartecfd03c2010-02-12 14:41:27 -05004827 *
4828 * This routine is invoked when waiting for FCF table rediscover has been
4829 * timed out. If new FCF record(s) has (have) been discovered during the
4830 * wait period, a new FCF event shall be added to the FCOE async event
4831 * list, and then worker thread shall be waked up for processing from the
4832 * worker thread context.
4833 **/
Rashika Kheriae399b222014-09-03 12:55:28 -04004834static void
Kees Cookf22eb4d2017-09-06 20:24:26 -07004835lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
James Smartecfd03c2010-02-12 14:41:27 -05004836{
Kees Cookf22eb4d2017-09-06 20:24:26 -07004837 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
James Smartecfd03c2010-02-12 14:41:27 -05004838
4839 /* Don't send FCF rediscovery event if timer cancelled */
4840 spin_lock_irq(&phba->hbalock);
4841 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4842 spin_unlock_irq(&phba->hbalock);
4843 return;
4844 }
4845 /* Clear FCF rediscovery timer pending flag */
4846 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4847 /* FCF rediscovery event to worker thread */
4848 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4849 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -05004850 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -04004851 "2776 FCF rediscover quiescent timer expired\n");
James Smartecfd03c2010-02-12 14:41:27 -05004852 /* wake up worker thread */
4853 lpfc_worker_wake_up(phba);
4854}
4855
4856/**
Gaurav Srivastava20397172021-06-08 10:05:54 +05304857 * lpfc_vmid_poll - VMID timeout detection
James Smart50baa152021-07-07 11:43:35 -07004858 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
Gaurav Srivastava20397172021-06-08 10:05:54 +05304859 *
4860 * This routine is invoked when there is no I/O on by a VM for the specified
4861 * amount of time. When this situation is detected, the VMID has to be
4862 * deregistered from the switch and all the local resources freed. The VMID
4863 * will be reassigned to the VM once the I/O begins.
4864 **/
4865static void
4866lpfc_vmid_poll(struct timer_list *t)
4867{
4868 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4869 u32 wake_up = 0;
4870
4871 /* check if there is a need to issue QFPA */
4872 if (phba->pport->vmid_priority_tagging) {
4873 wake_up = 1;
4874 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4875 }
4876
4877 /* Is the vmid inactivity timer enabled */
4878 if (phba->pport->vmid_inactivity_timeout ||
4879 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
4880 wake_up = 1;
4881 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
4882 }
4883
4884 if (wake_up)
4885 lpfc_worker_wake_up(phba);
4886
4887 /* restart the timer for the next iteration */
4888 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
4889 LPFC_VMID_TIMER));
4890}
4891
4892/**
James Smartda0436e2009-05-22 14:51:39 -04004893 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4894 * @phba: pointer to lpfc hba data structure.
4895 * @acqe_link: pointer to the async link completion queue entry.
4896 *
James Smart23288b72018-05-04 20:37:53 -07004897 * This routine is to parse the SLI4 link-attention link fault code.
James Smartda0436e2009-05-22 14:51:39 -04004898 **/
James Smart23288b72018-05-04 20:37:53 -07004899static void
James Smartda0436e2009-05-22 14:51:39 -04004900lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4901 struct lpfc_acqe_link *acqe_link)
4902{
James Smartda0436e2009-05-22 14:51:39 -04004903 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4904 case LPFC_ASYNC_LINK_FAULT_NONE:
4905 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4906 case LPFC_ASYNC_LINK_FAULT_REMOTE:
James Smart23288b72018-05-04 20:37:53 -07004907 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
James Smartda0436e2009-05-22 14:51:39 -04004908 break;
4909 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07004910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart23288b72018-05-04 20:37:53 -07004911 "0398 Unknown link fault code: x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04004912 bf_get(lpfc_acqe_link_fault, acqe_link));
James Smartda0436e2009-05-22 14:51:39 -04004913 break;
4914 }
James Smartda0436e2009-05-22 14:51:39 -04004915}
4916
4917/**
4918 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4919 * @phba: pointer to lpfc hba data structure.
4920 * @acqe_link: pointer to the async link completion queue entry.
4921 *
4922 * This routine is to parse the SLI4 link attention type and translate it
4923 * into the base driver's link attention type coding.
4924 *
4925 * Return: Link attention type in terms of base driver's coding.
4926 **/
4927static uint8_t
4928lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4929 struct lpfc_acqe_link *acqe_link)
4930{
4931 uint8_t att_type;
4932
4933 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4934 case LPFC_ASYNC_LINK_STATUS_DOWN:
4935 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
James Smart76a95d72010-11-20 23:11:48 -05004936 att_type = LPFC_ATT_LINK_DOWN;
James Smartda0436e2009-05-22 14:51:39 -04004937 break;
4938 case LPFC_ASYNC_LINK_STATUS_UP:
4939 /* Ignore physical link up events - wait for logical link up */
James Smart76a95d72010-11-20 23:11:48 -05004940 att_type = LPFC_ATT_RESERVED;
James Smartda0436e2009-05-22 14:51:39 -04004941 break;
4942 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
James Smart76a95d72010-11-20 23:11:48 -05004943 att_type = LPFC_ATT_LINK_UP;
James Smartda0436e2009-05-22 14:51:39 -04004944 break;
4945 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07004946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04004947 "0399 Invalid link attention type: x%x\n",
4948 bf_get(lpfc_acqe_link_status, acqe_link));
James Smart76a95d72010-11-20 23:11:48 -05004949 att_type = LPFC_ATT_RESERVED;
James Smartda0436e2009-05-22 14:51:39 -04004950 break;
4951 }
4952 return att_type;
4953}
4954
4955/**
James Smart8b68cd52012-09-29 11:32:37 -04004956 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4957 * @phba: pointer to lpfc hba data structure.
4958 *
4959 * This routine is to get an SLI3 FC port's link speed in Mbps.
4960 *
4961 * Return: link speed in terms of Mbps.
4962 **/
4963uint32_t
4964lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4965{
4966 uint32_t link_speed;
4967
4968 if (!lpfc_is_link_up(phba))
4969 return 0;
4970
James Smarta085e872015-12-16 18:12:02 -05004971 if (phba->sli_rev <= LPFC_SLI_REV3) {
4972 switch (phba->fc_linkspeed) {
4973 case LPFC_LINK_SPEED_1GHZ:
4974 link_speed = 1000;
4975 break;
4976 case LPFC_LINK_SPEED_2GHZ:
4977 link_speed = 2000;
4978 break;
4979 case LPFC_LINK_SPEED_4GHZ:
4980 link_speed = 4000;
4981 break;
4982 case LPFC_LINK_SPEED_8GHZ:
4983 link_speed = 8000;
4984 break;
4985 case LPFC_LINK_SPEED_10GHZ:
4986 link_speed = 10000;
4987 break;
4988 case LPFC_LINK_SPEED_16GHZ:
4989 link_speed = 16000;
4990 break;
4991 default:
4992 link_speed = 0;
4993 }
4994 } else {
4995 if (phba->sli4_hba.link_state.logical_speed)
4996 link_speed =
4997 phba->sli4_hba.link_state.logical_speed;
4998 else
4999 link_speed = phba->sli4_hba.link_state.speed;
James Smart8b68cd52012-09-29 11:32:37 -04005000 }
5001 return link_speed;
5002}
5003
5004/**
5005 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5006 * @phba: pointer to lpfc hba data structure.
5007 * @evt_code: asynchronous event code.
5008 * @speed_code: asynchronous event link speed code.
5009 *
5010 * This routine is to parse the giving SLI4 async event link speed code into
5011 * value of Mbps for the link speed.
5012 *
5013 * Return: link speed in terms of Mbps.
5014 **/
5015static uint32_t
5016lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5017 uint8_t speed_code)
5018{
5019 uint32_t port_speed;
5020
5021 switch (evt_code) {
5022 case LPFC_TRAILER_CODE_LINK:
5023 switch (speed_code) {
James Smart26d830e2015-04-07 15:07:17 -04005024 case LPFC_ASYNC_LINK_SPEED_ZERO:
James Smart8b68cd52012-09-29 11:32:37 -04005025 port_speed = 0;
5026 break;
James Smart26d830e2015-04-07 15:07:17 -04005027 case LPFC_ASYNC_LINK_SPEED_10MBPS:
James Smart8b68cd52012-09-29 11:32:37 -04005028 port_speed = 10;
5029 break;
James Smart26d830e2015-04-07 15:07:17 -04005030 case LPFC_ASYNC_LINK_SPEED_100MBPS:
James Smart8b68cd52012-09-29 11:32:37 -04005031 port_speed = 100;
5032 break;
James Smart26d830e2015-04-07 15:07:17 -04005033 case LPFC_ASYNC_LINK_SPEED_1GBPS:
James Smart8b68cd52012-09-29 11:32:37 -04005034 port_speed = 1000;
5035 break;
James Smart26d830e2015-04-07 15:07:17 -04005036 case LPFC_ASYNC_LINK_SPEED_10GBPS:
James Smart8b68cd52012-09-29 11:32:37 -04005037 port_speed = 10000;
5038 break;
James Smart26d830e2015-04-07 15:07:17 -04005039 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5040 port_speed = 20000;
5041 break;
5042 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5043 port_speed = 25000;
5044 break;
5045 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5046 port_speed = 40000;
5047 break;
Dick Kennedya1e4d3d2020-08-03 14:02:22 -07005048 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5049 port_speed = 100000;
5050 break;
James Smart8b68cd52012-09-29 11:32:37 -04005051 default:
5052 port_speed = 0;
5053 }
5054 break;
5055 case LPFC_TRAILER_CODE_FC:
5056 switch (speed_code) {
James Smart26d830e2015-04-07 15:07:17 -04005057 case LPFC_FC_LA_SPEED_UNKNOWN:
James Smart8b68cd52012-09-29 11:32:37 -04005058 port_speed = 0;
5059 break;
James Smart26d830e2015-04-07 15:07:17 -04005060 case LPFC_FC_LA_SPEED_1G:
James Smart8b68cd52012-09-29 11:32:37 -04005061 port_speed = 1000;
5062 break;
James Smart26d830e2015-04-07 15:07:17 -04005063 case LPFC_FC_LA_SPEED_2G:
James Smart8b68cd52012-09-29 11:32:37 -04005064 port_speed = 2000;
5065 break;
James Smart26d830e2015-04-07 15:07:17 -04005066 case LPFC_FC_LA_SPEED_4G:
James Smart8b68cd52012-09-29 11:32:37 -04005067 port_speed = 4000;
5068 break;
James Smart26d830e2015-04-07 15:07:17 -04005069 case LPFC_FC_LA_SPEED_8G:
James Smart8b68cd52012-09-29 11:32:37 -04005070 port_speed = 8000;
5071 break;
James Smart26d830e2015-04-07 15:07:17 -04005072 case LPFC_FC_LA_SPEED_10G:
James Smart8b68cd52012-09-29 11:32:37 -04005073 port_speed = 10000;
5074 break;
James Smart26d830e2015-04-07 15:07:17 -04005075 case LPFC_FC_LA_SPEED_16G:
James Smart8b68cd52012-09-29 11:32:37 -04005076 port_speed = 16000;
5077 break;
James Smartd38dd522015-08-31 16:48:17 -04005078 case LPFC_FC_LA_SPEED_32G:
5079 port_speed = 32000;
5080 break;
James Smartfbd8a6b2018-02-22 08:18:45 -08005081 case LPFC_FC_LA_SPEED_64G:
5082 port_speed = 64000;
5083 break;
James Smart1dc5ec22018-10-23 13:41:11 -07005084 case LPFC_FC_LA_SPEED_128G:
5085 port_speed = 128000;
5086 break;
James Smart8b68cd52012-09-29 11:32:37 -04005087 default:
5088 port_speed = 0;
5089 }
5090 break;
5091 default:
5092 port_speed = 0;
5093 }
5094 return port_speed;
5095}
5096
5097/**
James Smart70f3c072010-12-15 17:57:33 -05005098 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
James Smartda0436e2009-05-22 14:51:39 -04005099 * @phba: pointer to lpfc hba data structure.
5100 * @acqe_link: pointer to the async link completion queue entry.
5101 *
James Smart70f3c072010-12-15 17:57:33 -05005102 * This routine is to handle the SLI4 asynchronous FCoE link event.
James Smartda0436e2009-05-22 14:51:39 -04005103 **/
5104static void
5105lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5106 struct lpfc_acqe_link *acqe_link)
5107{
5108 struct lpfc_dmabuf *mp;
5109 LPFC_MBOXQ_t *pmb;
5110 MAILBOX_t *mb;
James Smart76a95d72010-11-20 23:11:48 -05005111 struct lpfc_mbx_read_top *la;
James Smartda0436e2009-05-22 14:51:39 -04005112 uint8_t att_type;
James Smart76a95d72010-11-20 23:11:48 -05005113 int rc;
James Smartda0436e2009-05-22 14:51:39 -04005114
5115 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
James Smart76a95d72010-11-20 23:11:48 -05005116 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
James Smartda0436e2009-05-22 14:51:39 -04005117 return;
James Smart32b97932009-07-19 10:01:21 -04005118 phba->fcoe_eventtag = acqe_link->event_tag;
James Smartda0436e2009-05-22 14:51:39 -04005119 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5120 if (!pmb) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04005122 "0395 The mboxq allocation failed\n");
5123 return;
5124 }
5125 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5126 if (!mp) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04005128 "0396 The lpfc_dmabuf allocation failed\n");
5129 goto out_free_pmb;
5130 }
5131 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5132 if (!mp->virt) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04005134 "0397 The mbuf allocation failed\n");
5135 goto out_free_dmabuf;
5136 }
5137
5138 /* Cleanup any outstanding ELS commands */
5139 lpfc_els_flush_all_cmd(phba);
5140
5141 /* Block ELS IOCBs until we have done process link event */
James Smart895427b2017-02-12 13:52:30 -08005142 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
James Smartda0436e2009-05-22 14:51:39 -04005143
5144 /* Update link event statistics */
5145 phba->sli.slistat.link_event++;
5146
James Smart76a95d72010-11-20 23:11:48 -05005147 /* Create lpfc_handle_latt mailbox command from link ACQE */
5148 lpfc_read_topology(phba, pmb, mp);
5149 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
James Smartda0436e2009-05-22 14:51:39 -04005150 pmb->vport = phba->pport;
5151
James Smartda0436e2009-05-22 14:51:39 -04005152 /* Keep the link status for extra SLI4 state machine reference */
5153 phba->sli4_hba.link_state.speed =
James Smart8b68cd52012-09-29 11:32:37 -04005154 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5155 bf_get(lpfc_acqe_link_speed, acqe_link));
James Smartda0436e2009-05-22 14:51:39 -04005156 phba->sli4_hba.link_state.duplex =
5157 bf_get(lpfc_acqe_link_duplex, acqe_link);
5158 phba->sli4_hba.link_state.status =
5159 bf_get(lpfc_acqe_link_status, acqe_link);
James Smart70f3c072010-12-15 17:57:33 -05005160 phba->sli4_hba.link_state.type =
5161 bf_get(lpfc_acqe_link_type, acqe_link);
5162 phba->sli4_hba.link_state.number =
5163 bf_get(lpfc_acqe_link_number, acqe_link);
James Smartda0436e2009-05-22 14:51:39 -04005164 phba->sli4_hba.link_state.fault =
5165 bf_get(lpfc_acqe_link_fault, acqe_link);
James Smart65467b62010-01-26 23:08:29 -05005166 phba->sli4_hba.link_state.logical_speed =
James Smart8b68cd52012-09-29 11:32:37 -04005167 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5168
James Smart70f3c072010-12-15 17:57:33 -05005169 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartc31098c2011-04-16 11:03:33 -04005170 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5171 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5172 "Logical speed:%dMbps Fault:%d\n",
James Smart70f3c072010-12-15 17:57:33 -05005173 phba->sli4_hba.link_state.speed,
5174 phba->sli4_hba.link_state.topology,
5175 phba->sli4_hba.link_state.status,
5176 phba->sli4_hba.link_state.type,
5177 phba->sli4_hba.link_state.number,
James Smart8b68cd52012-09-29 11:32:37 -04005178 phba->sli4_hba.link_state.logical_speed,
James Smart70f3c072010-12-15 17:57:33 -05005179 phba->sli4_hba.link_state.fault);
James Smart76a95d72010-11-20 23:11:48 -05005180 /*
5181 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5182 * topology info. Note: Optional for non FC-AL ports.
5183 */
5184 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5185 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5186 if (rc == MBX_NOT_FINISHED)
5187 goto out_free_dmabuf;
5188 return;
5189 }
5190 /*
5191 * For FCoE Mode: fill in all the topology information we need and call
5192 * the READ_TOPOLOGY completion routine to continue without actually
5193 * sending the READ_TOPOLOGY mailbox command to the port.
5194 */
James Smart23288b72018-05-04 20:37:53 -07005195 /* Initialize completion status */
James Smart76a95d72010-11-20 23:11:48 -05005196 mb = &pmb->u.mb;
James Smart23288b72018-05-04 20:37:53 -07005197 mb->mbxStatus = MBX_SUCCESS;
5198
5199 /* Parse port fault information field */
5200 lpfc_sli4_parse_latt_fault(phba, acqe_link);
James Smart76a95d72010-11-20 23:11:48 -05005201
5202 /* Parse and translate link attention fields */
5203 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5204 la->eventTag = acqe_link->event_tag;
5205 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5206 bf_set(lpfc_mbx_read_top_link_spd, la,
James Smarta085e872015-12-16 18:12:02 -05005207 (bf_get(lpfc_acqe_link_speed, acqe_link)));
James Smart76a95d72010-11-20 23:11:48 -05005208
5209 /* Fake the the following irrelvant fields */
5210 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5211 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5212 bf_set(lpfc_mbx_read_top_il, la, 0);
5213 bf_set(lpfc_mbx_read_top_pb, la, 0);
5214 bf_set(lpfc_mbx_read_top_fa, la, 0);
5215 bf_set(lpfc_mbx_read_top_mm, la, 0);
James Smartda0436e2009-05-22 14:51:39 -04005216
5217 /* Invoke the lpfc_handle_latt mailbox command callback function */
James Smart76a95d72010-11-20 23:11:48 -05005218 lpfc_mbx_cmpl_read_topology(phba, pmb);
James Smartda0436e2009-05-22 14:51:39 -04005219
5220 return;
5221
5222out_free_dmabuf:
5223 kfree(mp);
5224out_free_pmb:
5225 mempool_free(pmb, phba->mbox_mem_pool);
5226}
5227
5228/**
James Smart1dc5ec22018-10-23 13:41:11 -07005229 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5230 * topology.
5231 * @phba: pointer to lpfc hba data structure.
James Smart1dc5ec22018-10-23 13:41:11 -07005232 * @speed_code: asynchronous event link speed code.
5233 *
5234 * This routine is to parse the giving SLI4 async event link speed code into
5235 * value of Read topology link speed.
5236 *
5237 * Return: link speed in terms of Read topology.
5238 **/
5239static uint8_t
5240lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5241{
5242 uint8_t port_speed;
5243
5244 switch (speed_code) {
5245 case LPFC_FC_LA_SPEED_1G:
5246 port_speed = LPFC_LINK_SPEED_1GHZ;
5247 break;
5248 case LPFC_FC_LA_SPEED_2G:
5249 port_speed = LPFC_LINK_SPEED_2GHZ;
5250 break;
5251 case LPFC_FC_LA_SPEED_4G:
5252 port_speed = LPFC_LINK_SPEED_4GHZ;
5253 break;
5254 case LPFC_FC_LA_SPEED_8G:
5255 port_speed = LPFC_LINK_SPEED_8GHZ;
5256 break;
5257 case LPFC_FC_LA_SPEED_16G:
5258 port_speed = LPFC_LINK_SPEED_16GHZ;
5259 break;
5260 case LPFC_FC_LA_SPEED_32G:
5261 port_speed = LPFC_LINK_SPEED_32GHZ;
5262 break;
5263 case LPFC_FC_LA_SPEED_64G:
5264 port_speed = LPFC_LINK_SPEED_64GHZ;
5265 break;
5266 case LPFC_FC_LA_SPEED_128G:
5267 port_speed = LPFC_LINK_SPEED_128GHZ;
5268 break;
5269 case LPFC_FC_LA_SPEED_256G:
5270 port_speed = LPFC_LINK_SPEED_256GHZ;
5271 break;
5272 default:
5273 port_speed = 0;
5274 break;
5275 }
5276
5277 return port_speed;
5278}
5279
5280#define trunk_link_status(__idx)\
5281 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5282 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5283 "Link up" : "Link down") : "NA"
5284/* Did port __idx reported an error */
5285#define trunk_port_fault(__idx)\
5286 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5287 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5288
5289static void
5290lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5291 struct lpfc_acqe_fc_la *acqe_fc)
5292{
5293 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5294 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5295
5296 phba->sli4_hba.link_state.speed =
5297 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5298 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5299
5300 phba->sli4_hba.link_state.logical_speed =
James Smartb8e6f132019-05-21 17:49:04 -07005301 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
James Smart1dc5ec22018-10-23 13:41:11 -07005302 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5303 phba->fc_linkspeed =
5304 lpfc_async_link_speed_to_read_top(
5305 phba,
5306 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5307
5308 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5309 phba->trunk_link.link0.state =
5310 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5311 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
James Smart529b3dd2018-12-13 15:17:54 -08005312 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
James Smart1dc5ec22018-10-23 13:41:11 -07005313 }
5314 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5315 phba->trunk_link.link1.state =
5316 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5317 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
James Smart529b3dd2018-12-13 15:17:54 -08005318 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
James Smart1dc5ec22018-10-23 13:41:11 -07005319 }
5320 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5321 phba->trunk_link.link2.state =
5322 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5323 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
James Smart529b3dd2018-12-13 15:17:54 -08005324 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
James Smart1dc5ec22018-10-23 13:41:11 -07005325 }
5326 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5327 phba->trunk_link.link3.state =
5328 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5329 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
James Smart529b3dd2018-12-13 15:17:54 -08005330 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
James Smart1dc5ec22018-10-23 13:41:11 -07005331 }
5332
Dick Kennedy372c1872020-06-30 14:50:00 -07005333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1dc5ec22018-10-23 13:41:11 -07005334 "2910 Async FC Trunking Event - Speed:%d\n"
5335 "\tLogical speed:%d "
5336 "port0: %s port1: %s port2: %s port3: %s\n",
5337 phba->sli4_hba.link_state.speed,
5338 phba->sli4_hba.link_state.logical_speed,
5339 trunk_link_status(0), trunk_link_status(1),
5340 trunk_link_status(2), trunk_link_status(3));
5341
5342 if (port_fault)
Dick Kennedy372c1872020-06-30 14:50:00 -07005343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1dc5ec22018-10-23 13:41:11 -07005344 "3202 trunk error:0x%x (%s) seen on port0:%s "
5345 /*
5346 * SLI-4: We have only 0xA error codes
5347 * defined as of now. print an appropriate
5348 * message in case driver needs to be updated.
5349 */
5350 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5351 "UNDEFINED. update driver." : trunk_errmsg[err],
5352 trunk_port_fault(0), trunk_port_fault(1),
5353 trunk_port_fault(2), trunk_port_fault(3));
5354}
5355
5356
5357/**
James Smart70f3c072010-12-15 17:57:33 -05005358 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5359 * @phba: pointer to lpfc hba data structure.
5360 * @acqe_fc: pointer to the async fc completion queue entry.
5361 *
5362 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5363 * that the event was received and then issue a read_topology mailbox command so
5364 * that the rest of the driver will treat it the same as SLI3.
5365 **/
5366static void
5367lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5368{
5369 struct lpfc_dmabuf *mp;
5370 LPFC_MBOXQ_t *pmb;
James Smart7bdedb32016-07-06 12:36:00 -07005371 MAILBOX_t *mb;
5372 struct lpfc_mbx_read_top *la;
James Smart70f3c072010-12-15 17:57:33 -05005373 int rc;
5374
5375 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5376 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart70f3c072010-12-15 17:57:33 -05005378 "2895 Non FC link Event detected.(%d)\n",
5379 bf_get(lpfc_trailer_type, acqe_fc));
5380 return;
5381 }
James Smart1dc5ec22018-10-23 13:41:11 -07005382
5383 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5384 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5385 lpfc_update_trunk_link_status(phba, acqe_fc);
5386 return;
5387 }
5388
James Smart70f3c072010-12-15 17:57:33 -05005389 /* Keep the link status for extra SLI4 state machine reference */
5390 phba->sli4_hba.link_state.speed =
James Smart8b68cd52012-09-29 11:32:37 -04005391 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5392 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
James Smart70f3c072010-12-15 17:57:33 -05005393 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5394 phba->sli4_hba.link_state.topology =
5395 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5396 phba->sli4_hba.link_state.status =
5397 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5398 phba->sli4_hba.link_state.type =
5399 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5400 phba->sli4_hba.link_state.number =
5401 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5402 phba->sli4_hba.link_state.fault =
5403 bf_get(lpfc_acqe_link_fault, acqe_fc);
James Smartb8e6f132019-05-21 17:49:04 -07005404
5405 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5406 LPFC_FC_LA_TYPE_LINK_DOWN)
5407 phba->sli4_hba.link_state.logical_speed = 0;
5408 else if (!phba->sli4_hba.conf_trunk)
5409 phba->sli4_hba.link_state.logical_speed =
James Smart8b68cd52012-09-29 11:32:37 -04005410 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
James Smartb8e6f132019-05-21 17:49:04 -07005411
James Smart70f3c072010-12-15 17:57:33 -05005412 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5413 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5414 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5415 "%dMbps Fault:%d\n",
5416 phba->sli4_hba.link_state.speed,
5417 phba->sli4_hba.link_state.topology,
5418 phba->sli4_hba.link_state.status,
5419 phba->sli4_hba.link_state.type,
5420 phba->sli4_hba.link_state.number,
James Smart8b68cd52012-09-29 11:32:37 -04005421 phba->sli4_hba.link_state.logical_speed,
James Smart70f3c072010-12-15 17:57:33 -05005422 phba->sli4_hba.link_state.fault);
5423 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5424 if (!pmb) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005425 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart70f3c072010-12-15 17:57:33 -05005426 "2897 The mboxq allocation failed\n");
5427 return;
5428 }
5429 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5430 if (!mp) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart70f3c072010-12-15 17:57:33 -05005432 "2898 The lpfc_dmabuf allocation failed\n");
5433 goto out_free_pmb;
5434 }
5435 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5436 if (!mp->virt) {
Dick Kennedy372c1872020-06-30 14:50:00 -07005437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart70f3c072010-12-15 17:57:33 -05005438 "2899 The mbuf allocation failed\n");
5439 goto out_free_dmabuf;
5440 }
5441
5442 /* Cleanup any outstanding ELS commands */
5443 lpfc_els_flush_all_cmd(phba);
5444
5445 /* Block ELS IOCBs until we have done process link event */
James Smart895427b2017-02-12 13:52:30 -08005446 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
James Smart70f3c072010-12-15 17:57:33 -05005447
5448 /* Update link event statistics */
5449 phba->sli.slistat.link_event++;
5450
5451 /* Create lpfc_handle_latt mailbox command from link ACQE */
5452 lpfc_read_topology(phba, pmb, mp);
5453 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5454 pmb->vport = phba->pport;
5455
James Smart7bdedb32016-07-06 12:36:00 -07005456 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
James Smartae9e28f2017-05-15 15:20:51 -07005457 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5458
5459 switch (phba->sli4_hba.link_state.status) {
5460 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5461 phba->link_flag |= LS_MDS_LINK_DOWN;
5462 break;
5463 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5464 phba->link_flag |= LS_MDS_LOOPBACK;
5465 break;
5466 default:
5467 break;
5468 }
5469
James Smart23288b72018-05-04 20:37:53 -07005470 /* Initialize completion status */
James Smart7bdedb32016-07-06 12:36:00 -07005471 mb = &pmb->u.mb;
James Smart23288b72018-05-04 20:37:53 -07005472 mb->mbxStatus = MBX_SUCCESS;
5473
5474 /* Parse port fault information field */
5475 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
James Smart7bdedb32016-07-06 12:36:00 -07005476
5477 /* Parse and translate link attention fields */
5478 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5479 la->eventTag = acqe_fc->event_tag;
James Smart7bdedb32016-07-06 12:36:00 -07005480
James Smartaeb3c812017-04-21 16:05:02 -07005481 if (phba->sli4_hba.link_state.status ==
5482 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5483 bf_set(lpfc_mbx_read_top_att_type, la,
5484 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5485 } else {
5486 bf_set(lpfc_mbx_read_top_att_type, la,
5487 LPFC_FC_LA_TYPE_LINK_DOWN);
5488 }
James Smart7bdedb32016-07-06 12:36:00 -07005489 /* Invoke the mailbox command callback function */
5490 lpfc_mbx_cmpl_read_topology(phba, pmb);
5491
5492 return;
5493 }
5494
James Smart70f3c072010-12-15 17:57:33 -05005495 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5496 if (rc == MBX_NOT_FINISHED)
5497 goto out_free_dmabuf;
5498 return;
5499
5500out_free_dmabuf:
5501 kfree(mp);
5502out_free_pmb:
5503 mempool_free(pmb, phba->mbox_mem_pool);
5504}
5505
5506/**
5507 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5508 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01005509 * @acqe_sli: pointer to the async SLI completion queue entry.
James Smart70f3c072010-12-15 17:57:33 -05005510 *
5511 * This routine is to handle the SLI4 asynchronous SLI events.
5512 **/
5513static void
5514lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5515{
James Smart4b8bae02012-06-12 13:55:07 -04005516 char port_name;
James Smart8c1312e2012-10-31 14:45:09 -04005517 char message[128];
James Smart4b8bae02012-06-12 13:55:07 -04005518 uint8_t status;
James Smart946727d2015-04-07 15:07:09 -04005519 uint8_t evt_type;
James Smart448193b2015-12-16 18:12:05 -05005520 uint8_t operational = 0;
James Smart946727d2015-04-07 15:07:09 -04005521 struct temp_event temp_event_data;
James Smart4b8bae02012-06-12 13:55:07 -04005522 struct lpfc_acqe_misconfigured_event *misconfigured;
James Smart946727d2015-04-07 15:07:09 -04005523 struct Scsi_Host *shost;
James Smartcd713482018-10-23 13:41:01 -07005524 struct lpfc_vport **vports;
5525 int rc, i;
James Smart4b8bae02012-06-12 13:55:07 -04005526
James Smart946727d2015-04-07 15:07:09 -04005527 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5528
James Smart448193b2015-12-16 18:12:05 -05005529 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd11ed162019-09-21 20:59:03 -07005530 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5531 "x%08x x%08x x%08x\n", evt_type,
James Smart448193b2015-12-16 18:12:05 -05005532 acqe_sli->event_data1, acqe_sli->event_data2,
James Smartd11ed162019-09-21 20:59:03 -07005533 acqe_sli->reserved, acqe_sli->trailer);
James Smart4b8bae02012-06-12 13:55:07 -04005534
5535 port_name = phba->Port[0];
5536 if (port_name == 0x00)
5537 port_name = '?'; /* get port name is empty */
5538
James Smart946727d2015-04-07 15:07:09 -04005539 switch (evt_type) {
5540 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5541 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5542 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5543 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5544
5545 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5546 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5547 acqe_sli->event_data1, port_name);
5548
James Smart310429e2016-07-06 12:35:54 -07005549 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
James Smart946727d2015-04-07 15:07:09 -04005550 shost = lpfc_shost_from_vport(phba->pport);
5551 fc_host_post_vendor_event(shost, fc_get_event_number(),
5552 sizeof(temp_event_data),
5553 (char *)&temp_event_data,
5554 SCSI_NL_VID_TYPE_PCI
5555 | PCI_VENDOR_ID_EMULEX);
5556 break;
5557 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5558 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5559 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5560 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5561
5562 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5563 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5564 acqe_sli->event_data1, port_name);
5565
5566 shost = lpfc_shost_from_vport(phba->pport);
5567 fc_host_post_vendor_event(shost, fc_get_event_number(),
5568 sizeof(temp_event_data),
5569 (char *)&temp_event_data,
5570 SCSI_NL_VID_TYPE_PCI
5571 | PCI_VENDOR_ID_EMULEX);
5572 break;
5573 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5574 misconfigured = (struct lpfc_acqe_misconfigured_event *)
James Smart4b8bae02012-06-12 13:55:07 -04005575 &acqe_sli->event_data1;
5576
James Smart946727d2015-04-07 15:07:09 -04005577 /* fetch the status for this port */
5578 switch (phba->sli4_hba.lnk_info.lnk_no) {
5579 case LPFC_LINK_NUMBER_0:
James Smart448193b2015-12-16 18:12:05 -05005580 status = bf_get(lpfc_sli_misconfigured_port0_state,
5581 &misconfigured->theEvent);
5582 operational = bf_get(lpfc_sli_misconfigured_port0_op,
James Smart4b8bae02012-06-12 13:55:07 -04005583 &misconfigured->theEvent);
James Smart946727d2015-04-07 15:07:09 -04005584 break;
5585 case LPFC_LINK_NUMBER_1:
James Smart448193b2015-12-16 18:12:05 -05005586 status = bf_get(lpfc_sli_misconfigured_port1_state,
5587 &misconfigured->theEvent);
5588 operational = bf_get(lpfc_sli_misconfigured_port1_op,
James Smart4b8bae02012-06-12 13:55:07 -04005589 &misconfigured->theEvent);
James Smart946727d2015-04-07 15:07:09 -04005590 break;
5591 case LPFC_LINK_NUMBER_2:
James Smart448193b2015-12-16 18:12:05 -05005592 status = bf_get(lpfc_sli_misconfigured_port2_state,
5593 &misconfigured->theEvent);
5594 operational = bf_get(lpfc_sli_misconfigured_port2_op,
James Smart4b8bae02012-06-12 13:55:07 -04005595 &misconfigured->theEvent);
James Smart946727d2015-04-07 15:07:09 -04005596 break;
5597 case LPFC_LINK_NUMBER_3:
James Smart448193b2015-12-16 18:12:05 -05005598 status = bf_get(lpfc_sli_misconfigured_port3_state,
5599 &misconfigured->theEvent);
5600 operational = bf_get(lpfc_sli_misconfigured_port3_op,
James Smart4b8bae02012-06-12 13:55:07 -04005601 &misconfigured->theEvent);
James Smart946727d2015-04-07 15:07:09 -04005602 break;
5603 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07005604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart448193b2015-12-16 18:12:05 -05005605 "3296 "
5606 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5607 "event: Invalid link %d",
5608 phba->sli4_hba.lnk_info.lnk_no);
5609 return;
James Smart946727d2015-04-07 15:07:09 -04005610 }
James Smart4b8bae02012-06-12 13:55:07 -04005611
James Smart448193b2015-12-16 18:12:05 -05005612 /* Skip if optic state unchanged */
5613 if (phba->sli4_hba.lnk_info.optic_state == status)
5614 return;
5615
James Smart946727d2015-04-07 15:07:09 -04005616 switch (status) {
5617 case LPFC_SLI_EVENT_STATUS_VALID:
James Smart448193b2015-12-16 18:12:05 -05005618 sprintf(message, "Physical Link is functional");
5619 break;
James Smart946727d2015-04-07 15:07:09 -04005620 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5621 sprintf(message, "Optics faulted/incorrectly "
5622 "installed/not installed - Reseat optics, "
5623 "if issue not resolved, replace.");
5624 break;
5625 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5626 sprintf(message,
5627 "Optics of two types installed - Remove one "
5628 "optic or install matching pair of optics.");
5629 break;
5630 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5631 sprintf(message, "Incompatible optics - Replace with "
James Smart292098b2012-09-29 11:31:41 -04005632 "compatible optics for card to function.");
James Smart946727d2015-04-07 15:07:09 -04005633 break;
James Smart448193b2015-12-16 18:12:05 -05005634 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5635 sprintf(message, "Unqualified optics - Replace with "
5636 "Avago optics for Warranty and Technical "
5637 "Support - Link is%s operational",
James Smart2ea259e2017-02-12 13:52:27 -08005638 (operational) ? " not" : "");
James Smart448193b2015-12-16 18:12:05 -05005639 break;
5640 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5641 sprintf(message, "Uncertified optics - Replace with "
5642 "Avago-certified optics to enable link "
5643 "operation - Link is%s operational",
James Smart2ea259e2017-02-12 13:52:27 -08005644 (operational) ? " not" : "");
James Smart448193b2015-12-16 18:12:05 -05005645 break;
James Smart946727d2015-04-07 15:07:09 -04005646 default:
5647 /* firmware is reporting a status we don't know about */
5648 sprintf(message, "Unknown event status x%02x", status);
5649 break;
5650 }
James Smartcd713482018-10-23 13:41:01 -07005651
5652 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5653 rc = lpfc_sli4_read_config(phba);
James Smart3952e912018-10-23 13:41:02 -07005654 if (rc) {
James Smartcd713482018-10-23 13:41:01 -07005655 phba->lmt = 0;
Dick Kennedy372c1872020-06-30 14:50:00 -07005656 lpfc_printf_log(phba, KERN_ERR,
5657 LOG_TRACE_EVENT,
James Smartcd713482018-10-23 13:41:01 -07005658 "3194 Unable to retrieve supported "
James Smart3952e912018-10-23 13:41:02 -07005659 "speeds, rc = 0x%x\n", rc);
James Smartcd713482018-10-23 13:41:01 -07005660 }
5661 vports = lpfc_create_vport_work_array(phba);
5662 if (vports != NULL) {
5663 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5664 i++) {
5665 shost = lpfc_shost_from_vport(vports[i]);
5666 lpfc_host_supported_speeds_set(shost);
5667 }
5668 }
5669 lpfc_destroy_vport_work_array(phba, vports);
5670
James Smart448193b2015-12-16 18:12:05 -05005671 phba->sli4_hba.lnk_info.optic_state = status;
James Smart946727d2015-04-07 15:07:09 -04005672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart448193b2015-12-16 18:12:05 -05005673 "3176 Port Name %c %s\n", port_name, message);
James Smart946727d2015-04-07 15:07:09 -04005674 break;
5675 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5676 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5677 "3192 Remote DPort Test Initiated - "
5678 "Event Data1:x%08x Event Data2: x%08x\n",
5679 acqe_sli->event_data1, acqe_sli->event_data2);
James Smart4b8bae02012-06-12 13:55:07 -04005680 break;
James Smarte7d85952019-10-18 14:18:29 -07005681 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5682 /* Misconfigured WWN. Reports that the SLI Port is configured
5683 * to use FA-WWN, but the attached device doesn’t support it.
5684 * No driver action is required.
5685 * Event Data1 - N.A, Event Data2 - N.A
5686 */
5687 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5688 "2699 Misconfigured FA-WWN - Attached device does "
5689 "not support FA-WWN\n");
5690 break;
James Smartd11ed162019-09-21 20:59:03 -07005691 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5692 /* EEPROM failure. No driver action is required */
5693 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5694 "2518 EEPROM failure - "
5695 "Event Data1: x%08x Event Data2: x%08x\n",
5696 acqe_sli->event_data1, acqe_sli->event_data2);
5697 break;
James Smart4b8bae02012-06-12 13:55:07 -04005698 default:
James Smart946727d2015-04-07 15:07:09 -04005699 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd11ed162019-09-21 20:59:03 -07005700 "3193 Unrecognized SLI event, type: 0x%x",
James Smart946727d2015-04-07 15:07:09 -04005701 evt_type);
James Smart4b8bae02012-06-12 13:55:07 -04005702 break;
5703 }
James Smart70f3c072010-12-15 17:57:33 -05005704}
5705
5706/**
James Smartfc2b9892010-02-26 14:15:29 -05005707 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5708 * @vport: pointer to vport data structure.
5709 *
5710 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5711 * response to a CVL event.
5712 *
5713 * Return the pointer to the ndlp with the vport if successful, otherwise
5714 * return NULL.
5715 **/
5716static struct lpfc_nodelist *
5717lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5718{
5719 struct lpfc_nodelist *ndlp;
5720 struct Scsi_Host *shost;
5721 struct lpfc_hba *phba;
5722
5723 if (!vport)
5724 return NULL;
James Smartfc2b9892010-02-26 14:15:29 -05005725 phba = vport->phba;
5726 if (!phba)
5727 return NULL;
James Smart78730cf2010-04-06 15:06:30 -04005728 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5729 if (!ndlp) {
5730 /* Cannot find existing Fabric ndlp, so allocate a new one */
James Smart9d3d3402017-04-21 16:05:00 -07005731 ndlp = lpfc_nlp_init(vport, Fabric_DID);
James Smart78730cf2010-04-06 15:06:30 -04005732 if (!ndlp)
5733 return 0;
James Smart78730cf2010-04-06 15:06:30 -04005734 /* Set the node type */
5735 ndlp->nlp_type |= NLP_FABRIC;
5736 /* Put ndlp onto node list */
5737 lpfc_enqueue_node(vport, ndlp);
James Smart78730cf2010-04-06 15:06:30 -04005738 }
James Smart63e801c2010-11-20 23:14:19 -05005739 if ((phba->pport->port_state < LPFC_FLOGI) &&
5740 (phba->pport->port_state != LPFC_VPORT_FAILED))
James Smartfc2b9892010-02-26 14:15:29 -05005741 return NULL;
5742 /* If virtual link is not yet instantiated ignore CVL */
James Smart63e801c2010-11-20 23:14:19 -05005743 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5744 && (vport->port_state != LPFC_VPORT_FAILED))
James Smartfc2b9892010-02-26 14:15:29 -05005745 return NULL;
5746 shost = lpfc_shost_from_vport(vport);
5747 if (!shost)
5748 return NULL;
5749 lpfc_linkdown_port(vport);
5750 lpfc_cleanup_pending_mbox(vport);
5751 spin_lock_irq(shost->host_lock);
5752 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5753 spin_unlock_irq(shost->host_lock);
5754
5755 return ndlp;
5756}
5757
5758/**
5759 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
Lee Jonesfe614ac2020-07-23 13:24:22 +01005760 * @phba: pointer to lpfc hba data structure.
James Smartfc2b9892010-02-26 14:15:29 -05005761 *
5762 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5763 * response to a FCF dead event.
5764 **/
5765static void
5766lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5767{
5768 struct lpfc_vport **vports;
5769 int i;
5770
5771 vports = lpfc_create_vport_work_array(phba);
5772 if (vports)
5773 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5774 lpfc_sli4_perform_vport_cvl(vports[i]);
5775 lpfc_destroy_vport_work_array(phba, vports);
5776}
5777
5778/**
James Smart76a95d72010-11-20 23:11:48 -05005779 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
James Smartda0436e2009-05-22 14:51:39 -04005780 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01005781 * @acqe_fip: pointer to the async fcoe completion queue entry.
James Smartda0436e2009-05-22 14:51:39 -04005782 *
5783 * This routine is to handle the SLI4 asynchronous fcoe event.
5784 **/
5785static void
James Smart76a95d72010-11-20 23:11:48 -05005786lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
James Smart70f3c072010-12-15 17:57:33 -05005787 struct lpfc_acqe_fip *acqe_fip)
James Smartda0436e2009-05-22 14:51:39 -04005788{
James Smart70f3c072010-12-15 17:57:33 -05005789 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
James Smartda0436e2009-05-22 14:51:39 -04005790 int rc;
James Smart6669f9b2009-10-02 15:16:45 -04005791 struct lpfc_vport *vport;
5792 struct lpfc_nodelist *ndlp;
James Smart695a8142010-01-26 23:08:03 -05005793 int active_vlink_present;
5794 struct lpfc_vport **vports;
5795 int i;
James Smartda0436e2009-05-22 14:51:39 -04005796
James Smart70f3c072010-12-15 17:57:33 -05005797 phba->fc_eventTag = acqe_fip->event_tag;
5798 phba->fcoe_eventtag = acqe_fip->event_tag;
James Smartda0436e2009-05-22 14:51:39 -04005799 switch (event_type) {
James Smart70f3c072010-12-15 17:57:33 -05005800 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5801 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5802 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
Dick Kennedy372c1872020-06-30 14:50:00 -07005803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smarta93ff372010-10-22 11:06:08 -04005804 "2546 New FCF event, evt_tag:x%x, "
5805 "index:x%x\n",
James Smart70f3c072010-12-15 17:57:33 -05005806 acqe_fip->event_tag,
5807 acqe_fip->index);
James Smart999d8132010-03-15 11:24:56 -04005808 else
5809 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5810 LOG_DISCOVERY,
James Smarta93ff372010-10-22 11:06:08 -04005811 "2788 FCF param modified event, "
5812 "evt_tag:x%x, index:x%x\n",
James Smart70f3c072010-12-15 17:57:33 -05005813 acqe_fip->event_tag,
5814 acqe_fip->index);
James Smart38b92ef2010-08-04 16:11:39 -04005815 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
James Smart0c9ab6f2010-02-26 14:15:57 -05005816 /*
5817 * During period of FCF discovery, read the FCF
5818 * table record indexed by the event to update
James Smarta93ff372010-10-22 11:06:08 -04005819 * FCF roundrobin failover eligible FCF bmask.
James Smart0c9ab6f2010-02-26 14:15:57 -05005820 */
5821 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5822 LOG_DISCOVERY,
James Smarta93ff372010-10-22 11:06:08 -04005823 "2779 Read FCF (x%x) for updating "
5824 "roundrobin FCF failover bmask\n",
James Smart70f3c072010-12-15 17:57:33 -05005825 acqe_fip->index);
5826 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
James Smart0c9ab6f2010-02-26 14:15:57 -05005827 }
James Smart38b92ef2010-08-04 16:11:39 -04005828
5829 /* If the FCF discovery is in progress, do nothing. */
James Smart3804dc82010-07-14 15:31:37 -04005830 spin_lock_irq(&phba->hbalock);
James Smarta93ff372010-10-22 11:06:08 -04005831 if (phba->hba_flag & FCF_TS_INPROG) {
James Smart38b92ef2010-08-04 16:11:39 -04005832 spin_unlock_irq(&phba->hbalock);
5833 break;
5834 }
5835 /* If fast FCF failover rescan event is pending, do nothing */
James Smart036cad12018-10-23 13:41:06 -07005836 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
James Smart38b92ef2010-08-04 16:11:39 -04005837 spin_unlock_irq(&phba->hbalock);
5838 break;
5839 }
5840
James Smartc2b97122013-05-31 17:05:36 -04005841 /* If the FCF has been in discovered state, do nothing. */
5842 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
James Smart3804dc82010-07-14 15:31:37 -04005843 spin_unlock_irq(&phba->hbalock);
5844 break;
5845 }
5846 spin_unlock_irq(&phba->hbalock);
James Smart38b92ef2010-08-04 16:11:39 -04005847
James Smart0c9ab6f2010-02-26 14:15:57 -05005848 /* Otherwise, scan the entire FCF table and re-discover SAN */
5849 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
James Smarta93ff372010-10-22 11:06:08 -04005850 "2770 Start FCF table scan per async FCF "
5851 "event, evt_tag:x%x, index:x%x\n",
James Smart70f3c072010-12-15 17:57:33 -05005852 acqe_fip->event_tag, acqe_fip->index);
James Smart0c9ab6f2010-02-26 14:15:57 -05005853 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5854 LPFC_FCOE_FCF_GET_FIRST);
James Smartda0436e2009-05-22 14:51:39 -04005855 if (rc)
Dick Kennedy372c1872020-06-30 14:50:00 -07005856 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0c9ab6f2010-02-26 14:15:57 -05005857 "2547 Issue FCF scan read FCF mailbox "
James Smarta93ff372010-10-22 11:06:08 -04005858 "command failed (x%x)\n", rc);
James Smartda0436e2009-05-22 14:51:39 -04005859 break;
5860
James Smart70f3c072010-12-15 17:57:33 -05005861 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
Dick Kennedy372c1872020-06-30 14:50:00 -07005862 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5863 "2548 FCF Table full count 0x%x tag 0x%x\n",
5864 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5865 acqe_fip->event_tag);
James Smartda0436e2009-05-22 14:51:39 -04005866 break;
5867
James Smart70f3c072010-12-15 17:57:33 -05005868 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
James Smart80c17842012-03-01 22:35:45 -05005869 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
Dick Kennedy372c1872020-06-30 14:50:00 -07005870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5871 "2549 FCF (x%x) disconnected from network, "
5872 "tag:x%x\n", acqe_fip->index,
5873 acqe_fip->event_tag);
James Smart38b92ef2010-08-04 16:11:39 -04005874 /*
5875 * If we are in the middle of FCF failover process, clear
5876 * the corresponding FCF bit in the roundrobin bitmap.
James Smartda0436e2009-05-22 14:51:39 -04005877 */
James Smartfc2b9892010-02-26 14:15:29 -05005878 spin_lock_irq(&phba->hbalock);
James Smarta1cadfe2016-07-06 12:36:02 -07005879 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5880 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
James Smartfc2b9892010-02-26 14:15:29 -05005881 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -05005882 /* Update FLOGI FCF failover eligible FCF bmask */
James Smart70f3c072010-12-15 17:57:33 -05005883 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
James Smartfc2b9892010-02-26 14:15:29 -05005884 break;
5885 }
James Smart38b92ef2010-08-04 16:11:39 -04005886 spin_unlock_irq(&phba->hbalock);
5887
5888 /* If the event is not for currently used fcf do nothing */
James Smart70f3c072010-12-15 17:57:33 -05005889 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
James Smart38b92ef2010-08-04 16:11:39 -04005890 break;
5891
5892 /*
5893 * Otherwise, request the port to rediscover the entire FCF
5894 * table for a fast recovery from case that the current FCF
5895 * is no longer valid as we are not in the middle of FCF
5896 * failover process already.
5897 */
James Smartc2b97122013-05-31 17:05:36 -04005898 spin_lock_irq(&phba->hbalock);
5899 /* Mark the fast failover process in progress */
5900 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5901 spin_unlock_irq(&phba->hbalock);
5902
5903 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5904 "2771 Start FCF fast failover process due to "
5905 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5906 "\n", acqe_fip->event_tag, acqe_fip->index);
5907 rc = lpfc_sli4_redisc_fcf_table(phba);
5908 if (rc) {
5909 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
Dick Kennedy372c1872020-06-30 14:50:00 -07005910 LOG_TRACE_EVENT,
Colin Ian King7afc0ce2018-05-03 10:26:12 +01005911 "2772 Issue FCF rediscover mailbox "
James Smartc2b97122013-05-31 17:05:36 -04005912 "command failed, fail through to FCF "
5913 "dead event\n");
5914 spin_lock_irq(&phba->hbalock);
5915 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5916 spin_unlock_irq(&phba->hbalock);
5917 /*
5918 * Last resort will fail over by treating this
5919 * as a link down to FCF registration.
5920 */
5921 lpfc_sli4_fcf_dead_failthrough(phba);
5922 } else {
5923 /* Reset FCF roundrobin bmask for new discovery */
5924 lpfc_sli4_clear_fcf_rr_bmask(phba);
5925 /*
5926 * Handling fast FCF failover to a DEAD FCF event is
5927 * considered equalivant to receiving CVL to all vports.
5928 */
5929 lpfc_sli4_perform_all_vport_cvl(phba);
5930 }
James Smartda0436e2009-05-22 14:51:39 -04005931 break;
James Smart70f3c072010-12-15 17:57:33 -05005932 case LPFC_FIP_EVENT_TYPE_CVL:
James Smart80c17842012-03-01 22:35:45 -05005933 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
Dick Kennedy372c1872020-06-30 14:50:00 -07005934 lpfc_printf_log(phba, KERN_ERR,
5935 LOG_TRACE_EVENT,
James Smart6669f9b2009-10-02 15:16:45 -04005936 "2718 Clear Virtual Link Received for VPI 0x%x"
James Smart70f3c072010-12-15 17:57:33 -05005937 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
James Smart6d368e52011-05-24 11:44:12 -04005938
James Smart6669f9b2009-10-02 15:16:45 -04005939 vport = lpfc_find_vport_by_vpid(phba,
James Smart5248a742011-07-22 18:37:06 -04005940 acqe_fip->index);
James Smartfc2b9892010-02-26 14:15:29 -05005941 ndlp = lpfc_sli4_perform_vport_cvl(vport);
James Smart6669f9b2009-10-02 15:16:45 -04005942 if (!ndlp)
5943 break;
James Smart695a8142010-01-26 23:08:03 -05005944 active_vlink_present = 0;
5945
5946 vports = lpfc_create_vport_work_array(phba);
5947 if (vports) {
5948 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5949 i++) {
5950 if ((!(vports[i]->fc_flag &
5951 FC_VPORT_CVL_RCVD)) &&
5952 (vports[i]->port_state > LPFC_FDISC)) {
5953 active_vlink_present = 1;
5954 break;
5955 }
5956 }
5957 lpfc_destroy_vport_work_array(phba, vports);
5958 }
5959
James Smartcc823552015-05-21 13:55:26 -04005960 /*
5961 * Don't re-instantiate if vport is marked for deletion.
5962 * If we are here first then vport_delete is going to wait
5963 * for discovery to complete.
5964 */
5965 if (!(vport->load_flag & FC_UNLOADING) &&
5966 active_vlink_present) {
James Smart695a8142010-01-26 23:08:03 -05005967 /*
5968 * If there are other active VLinks present,
5969 * re-instantiate the Vlink using FDISC.
5970 */
James Smart256ec0d2013-04-17 20:14:58 -04005971 mod_timer(&ndlp->nlp_delayfunc,
5972 jiffies + msecs_to_jiffies(1000));
James Smartc6adba12020-11-15 11:26:34 -08005973 spin_lock_irq(&ndlp->lock);
James Smart6669f9b2009-10-02 15:16:45 -04005974 ndlp->nlp_flag |= NLP_DELAY_TMO;
James Smartc6adba12020-11-15 11:26:34 -08005975 spin_unlock_irq(&ndlp->lock);
James Smart695a8142010-01-26 23:08:03 -05005976 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5977 vport->port_state = LPFC_FDISC;
5978 } else {
James Smartecfd03c2010-02-12 14:41:27 -05005979 /*
5980 * Otherwise, we request port to rediscover
5981 * the entire FCF table for a fast recovery
5982 * from possible case that the current FCF
James Smart0c9ab6f2010-02-26 14:15:57 -05005983 * is no longer valid if we are not already
5984 * in the FCF failover process.
James Smartecfd03c2010-02-12 14:41:27 -05005985 */
James Smartfc2b9892010-02-26 14:15:29 -05005986 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -05005987 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
James Smartfc2b9892010-02-26 14:15:29 -05005988 spin_unlock_irq(&phba->hbalock);
5989 break;
5990 }
5991 /* Mark the fast failover process in progress */
James Smart0c9ab6f2010-02-26 14:15:57 -05005992 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -05005993 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -05005994 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5995 LOG_DISCOVERY,
James Smarta93ff372010-10-22 11:06:08 -04005996 "2773 Start FCF failover per CVL, "
James Smart70f3c072010-12-15 17:57:33 -05005997 "evt_tag:x%x\n", acqe_fip->event_tag);
James Smartecfd03c2010-02-12 14:41:27 -05005998 rc = lpfc_sli4_redisc_fcf_table(phba);
James Smartfc2b9892010-02-26 14:15:29 -05005999 if (rc) {
James Smart0c9ab6f2010-02-26 14:15:57 -05006000 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
Dick Kennedy372c1872020-06-30 14:50:00 -07006001 LOG_TRACE_EVENT,
James Smart0c9ab6f2010-02-26 14:15:57 -05006002 "2774 Issue FCF rediscover "
Colin Ian King7afc0ce2018-05-03 10:26:12 +01006003 "mailbox command failed, "
James Smart0c9ab6f2010-02-26 14:15:57 -05006004 "through to CVL event\n");
James Smartfc2b9892010-02-26 14:15:29 -05006005 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -05006006 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -05006007 spin_unlock_irq(&phba->hbalock);
James Smartecfd03c2010-02-12 14:41:27 -05006008 /*
6009 * Last resort will be re-try on the
6010 * the current registered FCF entry.
6011 */
6012 lpfc_retry_pport_discovery(phba);
James Smart38b92ef2010-08-04 16:11:39 -04006013 } else
6014 /*
6015 * Reset FCF roundrobin bmask for new
6016 * discovery.
6017 */
James Smart7d791df2011-07-22 18:37:52 -04006018 lpfc_sli4_clear_fcf_rr_bmask(phba);
James Smart6669f9b2009-10-02 15:16:45 -04006019 }
6020 break;
James Smartda0436e2009-05-22 14:51:39 -04006021 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07006022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6023 "0288 Unknown FCoE event type 0x%x event tag "
6024 "0x%x\n", event_type, acqe_fip->event_tag);
James Smartda0436e2009-05-22 14:51:39 -04006025 break;
6026 }
6027}
6028
6029/**
6030 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6031 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01006032 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
James Smartda0436e2009-05-22 14:51:39 -04006033 *
6034 * This routine is to handle the SLI4 asynchronous dcbx event.
6035 **/
6036static void
6037lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6038 struct lpfc_acqe_dcbx *acqe_dcbx)
6039{
James Smart4d9ab992009-10-02 15:16:39 -04006040 phba->fc_eventTag = acqe_dcbx->event_tag;
Dick Kennedy372c1872020-06-30 14:50:00 -07006041 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04006042 "0290 The SLI4 DCBX asynchronous event is not "
6043 "handled yet\n");
6044}
6045
6046/**
James Smartb19a0612010-04-06 14:48:51 -04006047 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6048 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01006049 * @acqe_grp5: pointer to the async grp5 completion queue entry.
James Smartb19a0612010-04-06 14:48:51 -04006050 *
6051 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6052 * is an asynchronous notified of a logical link speed change. The Port
6053 * reports the logical link speed in units of 10Mbps.
6054 **/
6055static void
6056lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6057 struct lpfc_acqe_grp5 *acqe_grp5)
6058{
6059 uint16_t prev_ll_spd;
6060
6061 phba->fc_eventTag = acqe_grp5->event_tag;
6062 phba->fcoe_eventtag = acqe_grp5->event_tag;
6063 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6064 phba->sli4_hba.link_state.logical_speed =
James Smart8b68cd52012-09-29 11:32:37 -04006065 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
James Smartb19a0612010-04-06 14:48:51 -04006066 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6067 "2789 GRP5 Async Event: Updating logical link speed "
James Smart8b68cd52012-09-29 11:32:37 -04006068 "from %dMbps to %dMbps\n", prev_ll_spd,
6069 phba->sli4_hba.link_state.logical_speed);
James Smartb19a0612010-04-06 14:48:51 -04006070}
6071
6072/**
James Smartda0436e2009-05-22 14:51:39 -04006073 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
6074 * @phba: pointer to lpfc hba data structure.
6075 *
6076 * This routine is invoked by the worker thread to process all the pending
6077 * SLI4 asynchronous events.
6078 **/
6079void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
6080{
6081 struct lpfc_cq_event *cq_event;
James Smarte7dab162020-10-20 13:27:12 -07006082 unsigned long iflags;
James Smartda0436e2009-05-22 14:51:39 -04006083
6084 /* First, declare the async event has been handled */
James Smarte7dab162020-10-20 13:27:12 -07006085 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartda0436e2009-05-22 14:51:39 -04006086 phba->hba_flag &= ~ASYNC_EVENT;
James Smarte7dab162020-10-20 13:27:12 -07006087 spin_unlock_irqrestore(&phba->hbalock, iflags);
6088
James Smartda0436e2009-05-22 14:51:39 -04006089 /* Now, handle all the async events */
James Smarte7dab162020-10-20 13:27:12 -07006090 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
James Smartda0436e2009-05-22 14:51:39 -04006091 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
James Smartda0436e2009-05-22 14:51:39 -04006092 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
6093 cq_event, struct lpfc_cq_event, list);
James Smarte7dab162020-10-20 13:27:12 -07006094 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
6095 iflags);
6096
James Smartda0436e2009-05-22 14:51:39 -04006097 /* Process the asynchronous event */
6098 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
6099 case LPFC_TRAILER_CODE_LINK:
6100 lpfc_sli4_async_link_evt(phba,
6101 &cq_event->cqe.acqe_link);
6102 break;
6103 case LPFC_TRAILER_CODE_FCOE:
James Smart70f3c072010-12-15 17:57:33 -05006104 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
James Smartda0436e2009-05-22 14:51:39 -04006105 break;
6106 case LPFC_TRAILER_CODE_DCBX:
6107 lpfc_sli4_async_dcbx_evt(phba,
6108 &cq_event->cqe.acqe_dcbx);
6109 break;
James Smartb19a0612010-04-06 14:48:51 -04006110 case LPFC_TRAILER_CODE_GRP5:
6111 lpfc_sli4_async_grp5_evt(phba,
6112 &cq_event->cqe.acqe_grp5);
6113 break;
James Smart70f3c072010-12-15 17:57:33 -05006114 case LPFC_TRAILER_CODE_FC:
6115 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
6116 break;
6117 case LPFC_TRAILER_CODE_SLI:
6118 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
6119 break;
James Smartda0436e2009-05-22 14:51:39 -04006120 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07006121 lpfc_printf_log(phba, KERN_ERR,
6122 LOG_TRACE_EVENT,
Colin Ian King291c2542019-12-18 08:43:01 +00006123 "1804 Invalid asynchronous event code: "
James Smartda0436e2009-05-22 14:51:39 -04006124 "x%x\n", bf_get(lpfc_trailer_code,
6125 &cq_event->cqe.mcqe_cmpl));
6126 break;
6127 }
James Smarte7dab162020-10-20 13:27:12 -07006128
James Smartda0436e2009-05-22 14:51:39 -04006129 /* Free the completion event processed to the free pool */
6130 lpfc_sli4_cq_event_release(phba, cq_event);
James Smarte7dab162020-10-20 13:27:12 -07006131 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
James Smartda0436e2009-05-22 14:51:39 -04006132 }
James Smarte7dab162020-10-20 13:27:12 -07006133 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
James Smartda0436e2009-05-22 14:51:39 -04006134}
6135
6136/**
James Smartecfd03c2010-02-12 14:41:27 -05006137 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
6138 * @phba: pointer to lpfc hba data structure.
6139 *
6140 * This routine is invoked by the worker thread to process FCF table
6141 * rediscovery pending completion event.
6142 **/
6143void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6144{
6145 int rc;
6146
6147 spin_lock_irq(&phba->hbalock);
6148 /* Clear FCF rediscovery timeout event */
6149 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6150 /* Clear driver fast failover FCF record flag */
6151 phba->fcf.failover_rec.flag = 0;
6152 /* Set state for FCF fast failover */
6153 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6154 spin_unlock_irq(&phba->hbalock);
6155
6156 /* Scan FCF table from the first entry to re-discover SAN */
James Smart0c9ab6f2010-02-26 14:15:57 -05006157 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
James Smarta93ff372010-10-22 11:06:08 -04006158 "2777 Start post-quiescent FCF table scan\n");
James Smart0c9ab6f2010-02-26 14:15:57 -05006159 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
James Smartecfd03c2010-02-12 14:41:27 -05006160 if (rc)
Dick Kennedy372c1872020-06-30 14:50:00 -07006161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0c9ab6f2010-02-26 14:15:57 -05006162 "2747 Issue FCF scan read FCF mailbox "
6163 "command failed 0x%x\n", rc);
James Smartecfd03c2010-02-12 14:41:27 -05006164}
6165
6166/**
James Smartda0436e2009-05-22 14:51:39 -04006167 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
6168 * @phba: pointer to lpfc hba data structure.
6169 * @dev_grp: The HBA PCI-Device group number.
6170 *
6171 * This routine is invoked to set up the per HBA PCI-Device group function
6172 * API jump table entries.
6173 *
6174 * Return: 0 if success, otherwise -ENODEV
6175 **/
6176int
6177lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
James Smart5b75da22008-12-04 22:39:35 -05006178{
6179 int rc;
6180
James Smartda0436e2009-05-22 14:51:39 -04006181 /* Set up lpfc PCI-device group */
6182 phba->pci_dev_grp = dev_grp;
James Smart5b75da22008-12-04 22:39:35 -05006183
James Smartda0436e2009-05-22 14:51:39 -04006184 /* The LPFC_PCI_DEV_OC uses SLI4 */
6185 if (dev_grp == LPFC_PCI_DEV_OC)
6186 phba->sli_rev = LPFC_SLI_REV4;
James Smart5b75da22008-12-04 22:39:35 -05006187
James Smartda0436e2009-05-22 14:51:39 -04006188 /* Set up device INIT API function jump table */
6189 rc = lpfc_init_api_table_setup(phba, dev_grp);
6190 if (rc)
6191 return -ENODEV;
6192 /* Set up SCSI API function jump table */
6193 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6194 if (rc)
6195 return -ENODEV;
6196 /* Set up SLI API function jump table */
6197 rc = lpfc_sli_api_table_setup(phba, dev_grp);
6198 if (rc)
6199 return -ENODEV;
6200 /* Set up MBOX API function jump table */
6201 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6202 if (rc)
6203 return -ENODEV;
James Smart5b75da22008-12-04 22:39:35 -05006204
James Smartda0436e2009-05-22 14:51:39 -04006205 return 0;
James Smart5b75da22008-12-04 22:39:35 -05006206}
6207
6208/**
James Smart3621a712009-04-06 18:47:14 -04006209 * lpfc_log_intr_mode - Log the active interrupt mode
James Smart5b75da22008-12-04 22:39:35 -05006210 * @phba: pointer to lpfc hba data structure.
6211 * @intr_mode: active interrupt mode adopted.
6212 *
6213 * This routine it invoked to log the currently used active interrupt mode
6214 * to the device.
James Smart3772a992009-05-22 14:50:54 -04006215 **/
6216static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
James Smart5b75da22008-12-04 22:39:35 -05006217{
6218 switch (intr_mode) {
6219 case 0:
6220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6221 "0470 Enable INTx interrupt mode.\n");
6222 break;
6223 case 1:
6224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6225 "0481 Enabled MSI interrupt mode.\n");
6226 break;
6227 case 2:
6228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6229 "0480 Enabled MSI-X interrupt mode.\n");
6230 break;
6231 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07006232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5b75da22008-12-04 22:39:35 -05006233 "0482 Illegal interrupt mode.\n");
6234 break;
6235 }
6236 return;
6237}
6238
James Smart5b75da22008-12-04 22:39:35 -05006239/**
James Smart3772a992009-05-22 14:50:54 -04006240 * lpfc_enable_pci_dev - Enable a generic PCI device.
James Smart5b75da22008-12-04 22:39:35 -05006241 * @phba: pointer to lpfc hba data structure.
6242 *
James Smart3772a992009-05-22 14:50:54 -04006243 * This routine is invoked to enable the PCI device that is common to all
6244 * PCI devices.
James Smart5b75da22008-12-04 22:39:35 -05006245 *
6246 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02006247 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -04006248 * other values - error
James Smart5b75da22008-12-04 22:39:35 -05006249 **/
James Smart3772a992009-05-22 14:50:54 -04006250static int
6251lpfc_enable_pci_dev(struct lpfc_hba *phba)
James Smart5b75da22008-12-04 22:39:35 -05006252{
James Smart3772a992009-05-22 14:50:54 -04006253 struct pci_dev *pdev;
James Smart5b75da22008-12-04 22:39:35 -05006254
James Smart3772a992009-05-22 14:50:54 -04006255 /* Obtain PCI device reference */
6256 if (!phba->pcidev)
6257 goto out_error;
6258 else
6259 pdev = phba->pcidev;
James Smart3772a992009-05-22 14:50:54 -04006260 /* Enable PCI device */
6261 if (pci_enable_device_mem(pdev))
6262 goto out_error;
6263 /* Request PCI resource for the device */
Johannes Thumshirne0c04832016-06-07 09:44:03 +02006264 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
James Smart3772a992009-05-22 14:50:54 -04006265 goto out_disable_device;
6266 /* Set up device as PCI master and save state for EEH */
6267 pci_set_master(pdev);
6268 pci_try_set_mwi(pdev);
6269 pci_save_state(pdev);
James Smart5b75da22008-12-04 22:39:35 -05006270
James Smart05580562011-05-24 11:40:48 -04006271 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
Jon Mason453193e2013-09-11 15:38:13 -07006272 if (pci_is_pcie(pdev))
James Smart05580562011-05-24 11:40:48 -04006273 pdev->needs_freset = 1;
6274
James Smart3772a992009-05-22 14:50:54 -04006275 return 0;
James Smart5b75da22008-12-04 22:39:35 -05006276
James Smart3772a992009-05-22 14:50:54 -04006277out_disable_device:
6278 pci_disable_device(pdev);
6279out_error:
Dick Kennedy372c1872020-06-30 14:50:00 -07006280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Johannes Thumshirne0c04832016-06-07 09:44:03 +02006281 "1401 Failed to enable pci device\n");
James Smart3772a992009-05-22 14:50:54 -04006282 return -ENODEV;
James Smart5b75da22008-12-04 22:39:35 -05006283}
6284
6285/**
James Smart3772a992009-05-22 14:50:54 -04006286 * lpfc_disable_pci_dev - Disable a generic PCI device.
James Smart5b75da22008-12-04 22:39:35 -05006287 * @phba: pointer to lpfc hba data structure.
6288 *
James Smart3772a992009-05-22 14:50:54 -04006289 * This routine is invoked to disable the PCI device that is common to all
6290 * PCI devices.
James Smart5b75da22008-12-04 22:39:35 -05006291 **/
6292static void
James Smart3772a992009-05-22 14:50:54 -04006293lpfc_disable_pci_dev(struct lpfc_hba *phba)
James Smart5b75da22008-12-04 22:39:35 -05006294{
James Smart3772a992009-05-22 14:50:54 -04006295 struct pci_dev *pdev;
James Smart5b75da22008-12-04 22:39:35 -05006296
James Smart3772a992009-05-22 14:50:54 -04006297 /* Obtain PCI device reference */
6298 if (!phba->pcidev)
6299 return;
6300 else
6301 pdev = phba->pcidev;
James Smart3772a992009-05-22 14:50:54 -04006302 /* Release PCI resource and disable PCI device */
Johannes Thumshirne0c04832016-06-07 09:44:03 +02006303 pci_release_mem_regions(pdev);
James Smart3772a992009-05-22 14:50:54 -04006304 pci_disable_device(pdev);
James Smart5b75da22008-12-04 22:39:35 -05006305
6306 return;
6307}
6308
6309/**
James Smart3772a992009-05-22 14:50:54 -04006310 * lpfc_reset_hba - Reset a hba
6311 * @phba: pointer to lpfc hba data structure.
James Smarte59058c2008-08-24 21:49:00 -04006312 *
James Smart3772a992009-05-22 14:50:54 -04006313 * This routine is invoked to reset a hba device. It brings the HBA
6314 * offline, performs a board restart, and then brings the board back
6315 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6316 * on outstanding mailbox commands.
James Smarte59058c2008-08-24 21:49:00 -04006317 **/
James Smart3772a992009-05-22 14:50:54 -04006318void
6319lpfc_reset_hba(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05006320{
James Smart3772a992009-05-22 14:50:54 -04006321 /* If resets are disabled then set error state and return. */
6322 if (!phba->cfg_enable_hba_reset) {
6323 phba->link_state = LPFC_HBA_ERROR;
6324 return;
6325 }
James Smart9ec58ec2021-01-04 10:02:35 -08006326
6327 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
6328 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
James Smartee620212014-04-04 13:51:53 -04006329 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
James Smart9ec58ec2021-01-04 10:02:35 -08006330 } else {
James Smartee620212014-04-04 13:51:53 -04006331 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
James Smart9ec58ec2021-01-04 10:02:35 -08006332 lpfc_sli_flush_io_rings(phba);
6333 }
James Smart3772a992009-05-22 14:50:54 -04006334 lpfc_offline(phba);
6335 lpfc_sli_brdrestart(phba);
6336 lpfc_online(phba);
6337 lpfc_unblock_mgmt_io(phba);
6338}
dea31012005-04-17 16:05:31 -05006339
James Smart3772a992009-05-22 14:50:54 -04006340/**
James Smart0a96e972011-07-22 18:37:28 -04006341 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6342 * @phba: pointer to lpfc hba data structure.
6343 *
6344 * This function enables the PCI SR-IOV virtual functions to a physical
6345 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6346 * enable the number of virtual functions to the physical function. As
6347 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6348 * API call does not considered as an error condition for most of the device.
6349 **/
6350uint16_t
6351lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6352{
6353 struct pci_dev *pdev = phba->pcidev;
6354 uint16_t nr_virtfn;
6355 int pos;
6356
James Smart0a96e972011-07-22 18:37:28 -04006357 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6358 if (pos == 0)
6359 return 0;
6360
6361 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6362 return nr_virtfn;
6363}
6364
6365/**
James Smart912e3ac2011-05-24 11:42:11 -04006366 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6367 * @phba: pointer to lpfc hba data structure.
6368 * @nr_vfn: number of virtual functions to be enabled.
6369 *
6370 * This function enables the PCI SR-IOV virtual functions to a physical
6371 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6372 * enable the number of virtual functions to the physical function. As
6373 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6374 * API call does not considered as an error condition for most of the device.
6375 **/
6376int
6377lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6378{
6379 struct pci_dev *pdev = phba->pcidev;
James Smart0a96e972011-07-22 18:37:28 -04006380 uint16_t max_nr_vfn;
James Smart912e3ac2011-05-24 11:42:11 -04006381 int rc;
6382
James Smart0a96e972011-07-22 18:37:28 -04006383 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6384 if (nr_vfn > max_nr_vfn) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a96e972011-07-22 18:37:28 -04006386 "3057 Requested vfs (%d) greater than "
6387 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6388 return -EINVAL;
6389 }
6390
James Smart912e3ac2011-05-24 11:42:11 -04006391 rc = pci_enable_sriov(pdev, nr_vfn);
6392 if (rc) {
6393 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6394 "2806 Failed to enable sriov on this device "
6395 "with vfn number nr_vf:%d, rc:%d\n",
6396 nr_vfn, rc);
6397 } else
6398 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6399 "2807 Successful enable sriov on this device "
6400 "with vfn number nr_vf:%d\n", nr_vfn);
6401 return rc;
6402}
6403
6404/**
James Smart895427b2017-02-12 13:52:30 -08006405 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
James Smart3772a992009-05-22 14:50:54 -04006406 * @phba: pointer to lpfc hba data structure.
6407 *
James Smart895427b2017-02-12 13:52:30 -08006408 * This routine is invoked to set up the driver internal resources before the
6409 * device specific resource setup to support the HBA device it attached to.
James Smart3772a992009-05-22 14:50:54 -04006410 *
6411 * Return codes
James Smart895427b2017-02-12 13:52:30 -08006412 * 0 - successful
6413 * other values - error
James Smart3772a992009-05-22 14:50:54 -04006414 **/
6415static int
James Smart895427b2017-02-12 13:52:30 -08006416lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
James Smart3772a992009-05-22 14:50:54 -04006417{
James Smart895427b2017-02-12 13:52:30 -08006418 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05006419
James Smart3772a992009-05-22 14:50:54 -04006420 /*
James Smart895427b2017-02-12 13:52:30 -08006421 * Driver resources common to all SLI revisions
James Smart3772a992009-05-22 14:50:54 -04006422 */
James Smart895427b2017-02-12 13:52:30 -08006423 atomic_set(&phba->fast_event_count, 0);
Dick Kennedy372c1872020-06-30 14:50:00 -07006424 atomic_set(&phba->dbg_log_idx, 0);
6425 atomic_set(&phba->dbg_log_cnt, 0);
6426 atomic_set(&phba->dbg_log_dmping, 0);
James Smart895427b2017-02-12 13:52:30 -08006427 spin_lock_init(&phba->hbalock);
dea31012005-04-17 16:05:31 -05006428
James Smart523128e2018-09-10 10:30:46 -07006429 /* Initialize port_list spinlock */
6430 spin_lock_init(&phba->port_list_lock);
James Smart895427b2017-02-12 13:52:30 -08006431 INIT_LIST_HEAD(&phba->port_list);
James Smart523128e2018-09-10 10:30:46 -07006432
James Smart895427b2017-02-12 13:52:30 -08006433 INIT_LIST_HEAD(&phba->work_list);
6434 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6435
6436 /* Initialize the wait queue head for the kernel thread */
6437 init_waitqueue_head(&phba->work_waitq);
6438
6439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smartf358dd02017-02-12 13:52:34 -08006440 "1403 Protocols supported %s %s %s\n",
James Smart895427b2017-02-12 13:52:30 -08006441 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6442 "SCSI" : " "),
6443 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
James Smartf358dd02017-02-12 13:52:34 -08006444 "NVME" : " "),
6445 (phba->nvmet_support ? "NVMET" : " "));
James Smart895427b2017-02-12 13:52:30 -08006446
James Smart0794d602019-01-28 11:14:19 -08006447 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6448 spin_lock_init(&phba->scsi_buf_list_get_lock);
6449 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6450 spin_lock_init(&phba->scsi_buf_list_put_lock);
6451 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
James Smart895427b2017-02-12 13:52:30 -08006452
6453 /* Initialize the fabric iocb list */
6454 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6455
6456 /* Initialize list to save ELS buffers */
6457 INIT_LIST_HEAD(&phba->elsbuf);
6458
6459 /* Initialize FCF connection rec list */
6460 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6461
6462 /* Initialize OAS configuration list */
6463 spin_lock_init(&phba->devicelock);
6464 INIT_LIST_HEAD(&phba->luns);
6465
James Smart3772a992009-05-22 14:50:54 -04006466 /* MBOX heartbeat timer */
Kees Cookf22eb4d2017-09-06 20:24:26 -07006467 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
James Smart3772a992009-05-22 14:50:54 -04006468 /* Fabric block timer */
Kees Cookf22eb4d2017-09-06 20:24:26 -07006469 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
James Smart3772a992009-05-22 14:50:54 -04006470 /* EA polling mode timer */
Kees Cookf22eb4d2017-09-06 20:24:26 -07006471 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
James Smart895427b2017-02-12 13:52:30 -08006472 /* Heartbeat timer */
Kees Cookf22eb4d2017-09-06 20:24:26 -07006473 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
James Smart895427b2017-02-12 13:52:30 -08006474
James Smart32517fc2019-01-28 11:14:33 -08006475 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6476
Dick Kennedy317aeb82020-06-30 14:49:59 -07006477 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6478 lpfc_idle_stat_delay_work);
6479
James Smart895427b2017-02-12 13:52:30 -08006480 return 0;
6481}
6482
6483/**
6484 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6485 * @phba: pointer to lpfc hba data structure.
6486 *
6487 * This routine is invoked to set up the driver internal resources specific to
6488 * support the SLI-3 HBA device it attached to.
6489 *
6490 * Return codes
6491 * 0 - successful
6492 * other values - error
6493 **/
6494static int
6495lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6496{
James Smart0794d602019-01-28 11:14:19 -08006497 int rc, entry_sz;
James Smart895427b2017-02-12 13:52:30 -08006498
6499 /*
6500 * Initialize timers used by driver
6501 */
6502
6503 /* FCP polling mode timer */
Kees Cookf22eb4d2017-09-06 20:24:26 -07006504 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
James Smart3772a992009-05-22 14:50:54 -04006505
6506 /* Host attention work mask setup */
6507 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6508 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6509
6510 /* Get all the module params for configuring this host */
6511 lpfc_get_cfgparam(phba);
James Smart895427b2017-02-12 13:52:30 -08006512 /* Set up phase-1 common device driver resources */
6513
6514 rc = lpfc_setup_driver_resource_phase1(phba);
6515 if (rc)
6516 return -ENODEV;
6517
James Smart49198b32010-04-06 15:04:33 -04006518 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6519 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6520 /* check for menlo minimum sg count */
6521 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6522 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6523 }
6524
James Smart895427b2017-02-12 13:52:30 -08006525 if (!phba->sli.sli3_ring)
Kees Cook6396bb22018-06-12 14:03:40 -07006526 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6527 sizeof(struct lpfc_sli_ring),
6528 GFP_KERNEL);
James Smart895427b2017-02-12 13:52:30 -08006529 if (!phba->sli.sli3_ring)
James Smart2a76a282012-08-03 12:35:54 -04006530 return -ENOMEM;
6531
James Smart3772a992009-05-22 14:50:54 -04006532 /*
James Smart96f70772013-04-17 20:16:15 -04006533 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
James Smart3772a992009-05-22 14:50:54 -04006534 * used to create the sg_dma_buf_pool must be dynamically calculated.
James Smart3772a992009-05-22 14:50:54 -04006535 */
James Smart3772a992009-05-22 14:50:54 -04006536
James Smart0794d602019-01-28 11:14:19 -08006537 if (phba->sli_rev == LPFC_SLI_REV4)
6538 entry_sz = sizeof(struct sli4_sge);
6539 else
6540 entry_sz = sizeof(struct ulp_bde64);
6541
James Smart96f70772013-04-17 20:16:15 -04006542 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6543 if (phba->cfg_enable_bg) {
6544 /*
6545 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6546 * the FCP rsp, and a BDE for each. Sice we have no control
6547 * over how many protection data segments the SCSI Layer
6548 * will hand us (ie: there could be one for every block
6549 * in the IO), we just allocate enough BDEs to accomidate
6550 * our max amount and we need to limit lpfc_sg_seg_cnt to
6551 * minimize the risk of running out.
6552 */
6553 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6554 sizeof(struct fcp_rsp) +
James Smart0794d602019-01-28 11:14:19 -08006555 (LPFC_MAX_SG_SEG_CNT * entry_sz);
James Smart96f70772013-04-17 20:16:15 -04006556
6557 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6558 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6559
6560 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6561 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6562 } else {
6563 /*
6564 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6565 * the FCP rsp, a BDE for each, and a BDE for up to
6566 * cfg_sg_seg_cnt data segments.
6567 */
6568 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6569 sizeof(struct fcp_rsp) +
James Smart0794d602019-01-28 11:14:19 -08006570 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
James Smart96f70772013-04-17 20:16:15 -04006571
6572 /* Total BDEs in BPL for scsi_sg_list */
6573 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6574 }
6575
6576 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
James Smartc90b4482020-03-22 11:12:56 -07006577 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
James Smart96f70772013-04-17 20:16:15 -04006578 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6579 phba->cfg_total_seg_cnt);
6580
James Smart3772a992009-05-22 14:50:54 -04006581 phba->max_vpi = LPFC_MAX_VPI;
6582 /* This will be set to correct value after config_port mbox */
6583 phba->max_vports = 0;
6584
6585 /*
6586 * Initialize the SLI Layer to run with lpfc HBAs.
6587 */
6588 lpfc_sli_setup(phba);
James Smart895427b2017-02-12 13:52:30 -08006589 lpfc_sli_queue_init(phba);
James Smart3772a992009-05-22 14:50:54 -04006590
6591 /* Allocate device driver memory */
6592 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6593 return -ENOMEM;
6594
James Smartd79c9e92019-08-14 16:57:09 -07006595 phba->lpfc_sg_dma_buf_pool =
6596 dma_pool_create("lpfc_sg_dma_buf_pool",
6597 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6598 BPL_ALIGN_SZ, 0);
6599
6600 if (!phba->lpfc_sg_dma_buf_pool)
6601 goto fail_free_mem;
6602
6603 phba->lpfc_cmd_rsp_buf_pool =
6604 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6605 &phba->pcidev->dev,
6606 sizeof(struct fcp_cmnd) +
6607 sizeof(struct fcp_rsp),
6608 BPL_ALIGN_SZ, 0);
6609
6610 if (!phba->lpfc_cmd_rsp_buf_pool)
6611 goto fail_free_dma_buf_pool;
6612
James Smart912e3ac2011-05-24 11:42:11 -04006613 /*
6614 * Enable sr-iov virtual functions if supported and configured
6615 * through the module parameter.
6616 */
6617 if (phba->cfg_sriov_nr_virtfn > 0) {
6618 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6619 phba->cfg_sriov_nr_virtfn);
6620 if (rc) {
6621 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6622 "2808 Requested number of SR-IOV "
6623 "virtual functions (%d) is not "
6624 "supported\n",
6625 phba->cfg_sriov_nr_virtfn);
6626 phba->cfg_sriov_nr_virtfn = 0;
6627 }
6628 }
6629
James Smart3772a992009-05-22 14:50:54 -04006630 return 0;
James Smartd79c9e92019-08-14 16:57:09 -07006631
6632fail_free_dma_buf_pool:
6633 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6634 phba->lpfc_sg_dma_buf_pool = NULL;
6635fail_free_mem:
6636 lpfc_mem_free(phba);
6637 return -ENOMEM;
James Smart3772a992009-05-22 14:50:54 -04006638}
6639
6640/**
6641 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6642 * @phba: pointer to lpfc hba data structure.
6643 *
6644 * This routine is invoked to unset the driver internal resources set up
6645 * specific for supporting the SLI-3 HBA device it attached to.
6646 **/
6647static void
6648lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6649{
6650 /* Free device driver memory allocated */
6651 lpfc_mem_free_all(phba);
6652
6653 return;
6654}
6655
6656/**
James Smartda0436e2009-05-22 14:51:39 -04006657 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6658 * @phba: pointer to lpfc hba data structure.
6659 *
6660 * This routine is invoked to set up the driver internal resources specific to
6661 * support the SLI-4 HBA device it attached to.
6662 *
6663 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02006664 * 0 - successful
James Smartda0436e2009-05-22 14:51:39 -04006665 * other values - error
6666 **/
6667static int
6668lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6669{
James Smart28baac72010-02-12 14:42:03 -05006670 LPFC_MBOXQ_t *mboxq;
James Smartf358dd02017-02-12 13:52:34 -08006671 MAILBOX_t *mb;
James Smart895427b2017-02-12 13:52:30 -08006672 int rc, i, max_buf_size;
James Smart09294d42013-04-17 20:16:05 -04006673 int longs;
James Smart81e6a632017-11-20 16:00:43 -08006674 int extra;
James Smartf358dd02017-02-12 13:52:34 -08006675 uint64_t wwn;
James Smartb92dc722018-05-24 21:09:01 -07006676 u32 if_type;
6677 u32 if_fam;
James Smartda0436e2009-05-22 14:51:39 -04006678
James Smart895427b2017-02-12 13:52:30 -08006679 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
James Smarteede4972019-11-21 09:55:56 -08006680 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
James Smart895427b2017-02-12 13:52:30 -08006681 phba->sli4_hba.curr_disp_cpu = 0;
6682
James Smart716d3bc2013-09-06 12:18:28 -04006683 /* Get all the module params for configuring this host */
6684 lpfc_get_cfgparam(phba);
6685
James Smart895427b2017-02-12 13:52:30 -08006686 /* Set up phase-1 common device driver resources */
6687 rc = lpfc_setup_driver_resource_phase1(phba);
6688 if (rc)
6689 return -ENODEV;
6690
James Smartda0436e2009-05-22 14:51:39 -04006691 /* Before proceed, wait for POST done and device ready */
6692 rc = lpfc_sli4_post_status_check(phba);
6693 if (rc)
6694 return -ENODEV;
6695
James Smart3cee98d2019-08-14 16:56:34 -07006696 /* Allocate all driver workqueues here */
6697
6698 /* The lpfc_wq workqueue for deferred irq use */
6699 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6700
James Smartda0436e2009-05-22 14:51:39 -04006701 /*
6702 * Initialize timers used by driver
6703 */
6704
Kees Cookf22eb4d2017-09-06 20:24:26 -07006705 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
James Smartda0436e2009-05-22 14:51:39 -04006706
James Smartecfd03c2010-02-12 14:41:27 -05006707 /* FCF rediscover timer */
Kees Cookf22eb4d2017-09-06 20:24:26 -07006708 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
James Smartecfd03c2010-02-12 14:41:27 -05006709
James Smartda0436e2009-05-22 14:51:39 -04006710 /*
James Smart7ad20aa2011-05-24 11:44:28 -04006711 * Control structure for handling external multi-buffer mailbox
6712 * command pass-through.
6713 */
6714 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6715 sizeof(struct lpfc_mbox_ext_buf_ctx));
6716 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6717
James Smartda0436e2009-05-22 14:51:39 -04006718 phba->max_vpi = LPFC_MAX_VPI;
James Smart67d12732012-08-03 12:36:13 -04006719
James Smartda0436e2009-05-22 14:51:39 -04006720 /* This will be set to correct value after the read_config mbox */
6721 phba->max_vports = 0;
6722
6723 /* Program the default value of vlan_id and fc_map */
6724 phba->valid_vlan = 0;
6725 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6726 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6727 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6728
6729 /*
James Smart2a76a282012-08-03 12:35:54 -04006730 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
James Smart895427b2017-02-12 13:52:30 -08006731 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6732 * The WQ create will allocate the ring.
James Smart2a76a282012-08-03 12:35:54 -04006733 */
James Smart085c6472010-11-20 23:11:37 -05006734
James Smartda0436e2009-05-22 14:51:39 -04006735 /* Initialize buffer queue management fields */
James Smart895427b2017-02-12 13:52:30 -08006736 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
James Smartda0436e2009-05-22 14:51:39 -04006737 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6738 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6739
Gaurav Srivastava20397172021-06-08 10:05:54 +05306740 /* for VMID idle timeout if VMID is enabled */
6741 if (lpfc_is_vmid_enabled(phba))
6742 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
6743
James Smartda0436e2009-05-22 14:51:39 -04006744 /*
6745 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6746 */
James Smartc00f62e2019-08-14 16:57:11 -07006747 /* Initialize the Abort buffer list used by driver */
6748 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6749 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
James Smart895427b2017-02-12 13:52:30 -08006750
6751 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6752 /* Initialize the Abort nvme buffer list used by driver */
James Smart5e5b5112019-01-28 11:14:22 -08006753 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
James Smart86c67372017-04-21 16:05:04 -07006754 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
James Smarta8cf5df2017-05-15 15:20:46 -07006755 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
James Smart79d8c4c2019-05-21 17:48:56 -07006756 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6757 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
James Smart895427b2017-02-12 13:52:30 -08006758 }
6759
James Smartda0436e2009-05-22 14:51:39 -04006760 /* This abort list used by worker thread */
James Smart895427b2017-02-12 13:52:30 -08006761 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
James Smarta8cf5df2017-05-15 15:20:46 -07006762 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
James Smarte7dab162020-10-20 13:27:12 -07006763 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6764 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
James Smartda0436e2009-05-22 14:51:39 -04006765
6766 /*
James Smart6d368e52011-05-24 11:44:12 -04006767 * Initialize driver internal slow-path work queues
James Smartda0436e2009-05-22 14:51:39 -04006768 */
6769
6770 /* Driver internel slow-path CQ Event pool */
6771 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6772 /* Response IOCB work queue list */
James Smart45ed1192009-10-02 15:17:02 -04006773 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
James Smartda0436e2009-05-22 14:51:39 -04006774 /* Asynchronous event CQ Event work queue list */
6775 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
James Smartda0436e2009-05-22 14:51:39 -04006776 /* Slow-path XRI aborted CQ Event work queue list */
6777 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6778 /* Receive queue CQ Event work queue list */
6779 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6780
James Smart6d368e52011-05-24 11:44:12 -04006781 /* Initialize extent block lists. */
6782 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6783 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6784 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6785 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6786
James Smartd1f525a2017-04-21 16:04:55 -07006787 /* Initialize mboxq lists. If the early init routines fail
6788 * these lists need to be correctly initialized.
6789 */
6790 INIT_LIST_HEAD(&phba->sli.mboxq);
6791 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6792
James Smart448193b2015-12-16 18:12:05 -05006793 /* initialize optic_state to 0xFF */
6794 phba->sli4_hba.lnk_info.optic_state = 0xff;
6795
James Smartda0436e2009-05-22 14:51:39 -04006796 /* Allocate device driver memory */
6797 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6798 if (rc)
6799 return -ENOMEM;
6800
James Smart2fcee4b2010-12-15 17:57:46 -05006801 /* IF Type 2 ports get initialized now. */
James Smart27d6ac02018-02-22 08:18:42 -08006802 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
James Smart2fcee4b2010-12-15 17:57:46 -05006803 LPFC_SLI_INTF_IF_TYPE_2) {
6804 rc = lpfc_pci_function_reset(phba);
James Smart895427b2017-02-12 13:52:30 -08006805 if (unlikely(rc)) {
6806 rc = -ENODEV;
6807 goto out_free_mem;
6808 }
James Smart946727d2015-04-07 15:07:09 -04006809 phba->temp_sensor_support = 1;
James Smart2fcee4b2010-12-15 17:57:46 -05006810 }
6811
James Smartda0436e2009-05-22 14:51:39 -04006812 /* Create the bootstrap mailbox command */
6813 rc = lpfc_create_bootstrap_mbox(phba);
6814 if (unlikely(rc))
6815 goto out_free_mem;
6816
6817 /* Set up the host's endian order with the device. */
6818 rc = lpfc_setup_endian_order(phba);
6819 if (unlikely(rc))
6820 goto out_free_bsmbx;
6821
6822 /* Set up the hba's configuration parameters. */
6823 rc = lpfc_sli4_read_config(phba);
6824 if (unlikely(rc))
6825 goto out_free_bsmbx;
James Smartcff261f2013-12-17 20:29:47 -05006826 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6827 if (unlikely(rc))
6828 goto out_free_bsmbx;
James Smartda0436e2009-05-22 14:51:39 -04006829
James Smart2fcee4b2010-12-15 17:57:46 -05006830 /* IF Type 0 ports get initialized now. */
6831 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6832 LPFC_SLI_INTF_IF_TYPE_0) {
6833 rc = lpfc_pci_function_reset(phba);
6834 if (unlikely(rc))
6835 goto out_free_bsmbx;
6836 }
James Smartda0436e2009-05-22 14:51:39 -04006837
James Smartcb5172e2010-03-15 11:25:07 -04006838 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6839 GFP_KERNEL);
6840 if (!mboxq) {
6841 rc = -ENOMEM;
6842 goto out_free_bsmbx;
6843 }
6844
James Smartf358dd02017-02-12 13:52:34 -08006845 /* Check for NVMET being configured */
James Smart895427b2017-02-12 13:52:30 -08006846 phba->nvmet_support = 0;
James Smartf358dd02017-02-12 13:52:34 -08006847 if (lpfc_enable_nvmet_cnt) {
6848
6849 /* First get WWN of HBA instance */
6850 lpfc_read_nv(phba, mboxq);
6851 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6852 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006853 lpfc_printf_log(phba, KERN_ERR,
6854 LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08006855 "6016 Mailbox failed , mbxCmd x%x "
6856 "READ_NV, mbxStatus x%x\n",
6857 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6858 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smartd1f525a2017-04-21 16:04:55 -07006859 mempool_free(mboxq, phba->mbox_mem_pool);
James Smartf358dd02017-02-12 13:52:34 -08006860 rc = -EIO;
6861 goto out_free_bsmbx;
6862 }
6863 mb = &mboxq->u.mb;
6864 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6865 sizeof(uint64_t));
6866 wwn = cpu_to_be64(wwn);
6867 phba->sli4_hba.wwnn.u.name = wwn;
6868 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6869 sizeof(uint64_t));
6870 /* wwn is WWPN of HBA instance */
6871 wwn = cpu_to_be64(wwn);
6872 phba->sli4_hba.wwpn.u.name = wwn;
6873
6874 /* Check to see if it matches any module parameter */
6875 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6876 if (wwn == lpfc_enable_nvmet[i]) {
James Smart7d708032017-03-08 14:36:01 -08006877#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
James Smart3c603be2017-05-15 15:20:44 -07006878 if (lpfc_nvmet_mem_alloc(phba))
6879 break;
6880
6881 phba->nvmet_support = 1; /* a match */
6882
Dick Kennedy372c1872020-06-30 14:50:00 -07006883 lpfc_printf_log(phba, KERN_ERR,
6884 LOG_TRACE_EVENT,
James Smartf358dd02017-02-12 13:52:34 -08006885 "6017 NVME Target %016llx\n",
6886 wwn);
James Smart7d708032017-03-08 14:36:01 -08006887#else
Dick Kennedy372c1872020-06-30 14:50:00 -07006888 lpfc_printf_log(phba, KERN_ERR,
6889 LOG_TRACE_EVENT,
James Smart7d708032017-03-08 14:36:01 -08006890 "6021 Can't enable NVME Target."
6891 " NVME_TARGET_FC infrastructure"
6892 " is not in kernel\n");
6893#endif
James Smartc4908502019-01-28 11:14:28 -08006894 /* Not supported for NVMET */
6895 phba->cfg_xri_rebalancing = 0;
Dick Kennedy3048e3e2020-05-01 14:43:06 -07006896 if (phba->irq_chann_mode == NHT_MODE) {
6897 phba->cfg_irq_chann =
6898 phba->sli4_hba.num_present_cpu;
6899 phba->cfg_hdw_queue =
6900 phba->sli4_hba.num_present_cpu;
6901 phba->irq_chann_mode = NORMAL_MODE;
6902 }
James Smart3c603be2017-05-15 15:20:44 -07006903 break;
James Smartf358dd02017-02-12 13:52:34 -08006904 }
6905 }
6906 }
James Smart895427b2017-02-12 13:52:30 -08006907
6908 lpfc_nvme_mod_param_dep(phba);
6909
James Smartfedd3b72011-02-16 12:39:24 -05006910 /*
6911 * Get sli4 parameters that override parameters from Port capabilities.
James Smart6d368e52011-05-24 11:44:12 -04006912 * If this call fails, it isn't critical unless the SLI4 parameters come
6913 * back in conflict.
James Smartfedd3b72011-02-16 12:39:24 -05006914 */
James Smart6d368e52011-05-24 11:44:12 -04006915 rc = lpfc_get_sli4_parameters(phba, mboxq);
6916 if (rc) {
James Smartb92dc722018-05-24 21:09:01 -07006917 if_type = bf_get(lpfc_sli_intf_if_type,
6918 &phba->sli4_hba.sli_intf);
6919 if_fam = bf_get(lpfc_sli_intf_sli_family,
6920 &phba->sli4_hba.sli_intf);
James Smart6d368e52011-05-24 11:44:12 -04006921 if (phba->sli4_hba.extents_in_use &&
6922 phba->sli4_hba.rpi_hdrs_in_use) {
Dick Kennedy372c1872020-06-30 14:50:00 -07006923 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6924 "2999 Unsupported SLI4 Parameters "
6925 "Extents and RPI headers enabled.\n");
James Smartb92dc722018-05-24 21:09:01 -07006926 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6927 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6928 mempool_free(mboxq, phba->mbox_mem_pool);
6929 rc = -EIO;
6930 goto out_free_bsmbx;
6931 }
James Smart6d368e52011-05-24 11:44:12 -04006932 }
James Smartb92dc722018-05-24 21:09:01 -07006933 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6934 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6935 mempool_free(mboxq, phba->mbox_mem_pool);
6936 rc = -EIO;
6937 goto out_free_bsmbx;
6938 }
James Smart6d368e52011-05-24 11:44:12 -04006939 }
James Smart895427b2017-02-12 13:52:30 -08006940
James Smartd79c9e92019-08-14 16:57:09 -07006941 /*
6942 * 1 for cmd, 1 for rsp, NVME adds an extra one
6943 * for boundary conditions in its max_sgl_segment template.
6944 */
6945 extra = 2;
6946 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6947 extra++;
6948
6949 /*
6950 * It doesn't matter what family our adapter is in, we are
6951 * limited to 2 Pages, 512 SGEs, for our SGL.
6952 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6953 */
6954 max_buf_size = (2 * SLI4_PAGE_SIZE);
6955
6956 /*
6957 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6958 * used to create the sg_dma_buf_pool must be calculated.
6959 */
6960 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6961 /* Both cfg_enable_bg and cfg_external_dif code paths */
6962
6963 /*
6964 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6965 * the FCP rsp, and a SGE. Sice we have no control
6966 * over how many protection segments the SCSI Layer
6967 * will hand us (ie: there could be one for every block
6968 * in the IO), just allocate enough SGEs to accomidate
6969 * our max amount and we need to limit lpfc_sg_seg_cnt
6970 * to minimize the risk of running out.
6971 */
6972 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6973 sizeof(struct fcp_rsp) + max_buf_size;
6974
6975 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6976 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6977
6978 /*
6979 * If supporting DIF, reduce the seg count for scsi to
6980 * allow room for the DIF sges.
6981 */
6982 if (phba->cfg_enable_bg &&
6983 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6984 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6985 else
6986 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6987
6988 } else {
6989 /*
6990 * The scsi_buf for a regular I/O holds the FCP cmnd,
6991 * the FCP rsp, a SGE for each, and a SGE for up to
6992 * cfg_sg_seg_cnt data segments.
6993 */
6994 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6995 sizeof(struct fcp_rsp) +
6996 ((phba->cfg_sg_seg_cnt + extra) *
6997 sizeof(struct sli4_sge));
6998
6999 /* Total SGEs for scsi_sg_list */
7000 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
7001 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
7002
7003 /*
7004 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
7005 * need to post 1 page for the SGL.
7006 */
7007 }
7008
7009 if (phba->cfg_xpsgl && !phba->nvmet_support)
7010 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
7011 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
7012 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
7013 else
7014 phba->cfg_sg_dma_buf_size =
7015 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
7016
7017 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
7018 sizeof(struct sli4_sge);
7019
7020 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
7021 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7022 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
7023 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
7024 "6300 Reducing NVME sg segment "
7025 "cnt to %d\n",
7026 LPFC_MAX_NVME_SEG_CNT);
7027 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
7028 } else
7029 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
7030 }
7031
James Smartd79c9e92019-08-14 16:57:09 -07007032 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7033 "9087 sg_seg_cnt:%d dmabuf_size:%d "
7034 "total:%d scsi:%d nvme:%d\n",
7035 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7036 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
7037 phba->cfg_nvme_seg_cnt);
7038
7039 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
7040 i = phba->cfg_sg_dma_buf_size;
7041 else
7042 i = SLI4_PAGE_SIZE;
7043
7044 phba->lpfc_sg_dma_buf_pool =
7045 dma_pool_create("lpfc_sg_dma_buf_pool",
7046 &phba->pcidev->dev,
7047 phba->cfg_sg_dma_buf_size,
7048 i, 0);
7049 if (!phba->lpfc_sg_dma_buf_pool)
7050 goto out_free_bsmbx;
7051
7052 phba->lpfc_cmd_rsp_buf_pool =
7053 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7054 &phba->pcidev->dev,
7055 sizeof(struct fcp_cmnd) +
7056 sizeof(struct fcp_rsp),
7057 i, 0);
7058 if (!phba->lpfc_cmd_rsp_buf_pool)
7059 goto out_free_sg_dma_buf;
7060
James Smartcb5172e2010-03-15 11:25:07 -04007061 mempool_free(mboxq, phba->mbox_mem_pool);
James Smart1ba981f2014-02-20 09:56:45 -05007062
7063 /* Verify OAS is supported */
7064 lpfc_sli4_oas_verify(phba);
James Smart1ba981f2014-02-20 09:56:45 -05007065
James Smartd2cc9bc2018-09-10 10:30:50 -07007066 /* Verify RAS support on adapter */
7067 lpfc_sli4_ras_init(phba);
7068
James Smart5350d872011-10-10 21:33:49 -04007069 /* Verify all the SLI4 queues */
7070 rc = lpfc_sli4_queue_verify(phba);
James Smartda0436e2009-05-22 14:51:39 -04007071 if (rc)
James Smartd79c9e92019-08-14 16:57:09 -07007072 goto out_free_cmd_rsp_buf;
James Smartda0436e2009-05-22 14:51:39 -04007073
7074 /* Create driver internal CQE event pool */
7075 rc = lpfc_sli4_cq_event_pool_create(phba);
7076 if (rc)
James Smartd79c9e92019-08-14 16:57:09 -07007077 goto out_free_cmd_rsp_buf;
James Smartda0436e2009-05-22 14:51:39 -04007078
James Smart8a9d2e82012-05-09 21:16:12 -04007079 /* Initialize sgl lists per host */
7080 lpfc_init_sgl_list(phba);
7081
7082 /* Allocate and initialize active sgl array */
James Smartda0436e2009-05-22 14:51:39 -04007083 rc = lpfc_init_active_sgl_array(phba);
7084 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007086 "1430 Failed to initialize sgl list.\n");
James Smart8a9d2e82012-05-09 21:16:12 -04007087 goto out_destroy_cq_event_pool;
James Smartda0436e2009-05-22 14:51:39 -04007088 }
James Smartda0436e2009-05-22 14:51:39 -04007089 rc = lpfc_sli4_init_rpi_hdrs(phba);
7090 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007092 "1432 Failed to initialize rpi headers.\n");
7093 goto out_free_active_sgl;
7094 }
7095
James Smarta93ff372010-10-22 11:06:08 -04007096 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
James Smart0c9ab6f2010-02-26 14:15:57 -05007097 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07007098 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
James Smart0c9ab6f2010-02-26 14:15:57 -05007099 GFP_KERNEL);
7100 if (!phba->fcf.fcf_rr_bmask) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0c9ab6f2010-02-26 14:15:57 -05007102 "2759 Failed allocate memory for FCF round "
7103 "robin failover bmask\n");
James Smart05580562011-05-24 11:40:48 -04007104 rc = -ENOMEM;
James Smart0c9ab6f2010-02-26 14:15:57 -05007105 goto out_remove_rpi_hdrs;
7106 }
7107
James Smart6a828b02019-01-28 11:14:31 -08007108 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
James Smartcdb42be2019-01-28 11:14:21 -08007109 sizeof(struct lpfc_hba_eq_hdl),
7110 GFP_KERNEL);
James Smart895427b2017-02-12 13:52:30 -08007111 if (!phba->sli4_hba.hba_eq_hdl) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart67d12732012-08-03 12:36:13 -04007113 "2572 Failed allocate memory for "
7114 "fast-path per-EQ handle array\n");
7115 rc = -ENOMEM;
7116 goto out_free_fcf_rr_bmask;
James Smartda0436e2009-05-22 14:51:39 -04007117 }
7118
James Smart222e9232019-01-28 11:14:35 -08007119 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
James Smart895427b2017-02-12 13:52:30 -08007120 sizeof(struct lpfc_vector_map_info),
7121 GFP_KERNEL);
James Smart7bb03bb2013-04-17 20:19:16 -04007122 if (!phba->sli4_hba.cpu_map) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart7bb03bb2013-04-17 20:19:16 -04007124 "3327 Failed allocate memory for msi-x "
7125 "interrupt vector mapping\n");
7126 rc = -ENOMEM;
James Smart895427b2017-02-12 13:52:30 -08007127 goto out_free_hba_eq_hdl;
James Smart7bb03bb2013-04-17 20:19:16 -04007128 }
James Smartb246de12013-05-31 17:03:07 -04007129
James Smart32517fc2019-01-28 11:14:33 -08007130 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7131 if (!phba->sli4_hba.eq_info) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart32517fc2019-01-28 11:14:33 -08007133 "3321 Failed allocation for per_cpu stats\n");
7134 rc = -ENOMEM;
7135 goto out_free_hba_cpu_map;
7136 }
James Smart840eda92020-03-22 11:13:00 -07007137
Dick Kennedy317aeb82020-06-30 14:49:59 -07007138 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7139 sizeof(*phba->sli4_hba.idle_stat),
7140 GFP_KERNEL);
7141 if (!phba->sli4_hba.idle_stat) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedy317aeb82020-06-30 14:49:59 -07007143 "3390 Failed allocation for idle_stat\n");
7144 rc = -ENOMEM;
7145 goto out_free_hba_eq_info;
7146 }
7147
James Smart840eda92020-03-22 11:13:00 -07007148#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7149 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7150 if (!phba->sli4_hba.c_stat) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart840eda92020-03-22 11:13:00 -07007152 "3332 Failed allocating per cpu hdwq stats\n");
7153 rc = -ENOMEM;
Dick Kennedy317aeb82020-06-30 14:49:59 -07007154 goto out_free_hba_idle_stat;
James Smart840eda92020-03-22 11:13:00 -07007155 }
7156#endif
7157
James Smart912e3ac2011-05-24 11:42:11 -04007158 /*
7159 * Enable sr-iov virtual functions if supported and configured
7160 * through the module parameter.
7161 */
7162 if (phba->cfg_sriov_nr_virtfn > 0) {
7163 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7164 phba->cfg_sriov_nr_virtfn);
7165 if (rc) {
7166 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7167 "3020 Requested number of SR-IOV "
7168 "virtual functions (%d) is not "
7169 "supported\n",
7170 phba->cfg_sriov_nr_virtfn);
7171 phba->cfg_sriov_nr_virtfn = 0;
7172 }
7173 }
7174
James Smart5248a742011-07-22 18:37:06 -04007175 return 0;
James Smartda0436e2009-05-22 14:51:39 -04007176
James Smart840eda92020-03-22 11:13:00 -07007177#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
Dick Kennedy317aeb82020-06-30 14:49:59 -07007178out_free_hba_idle_stat:
7179 kfree(phba->sli4_hba.idle_stat);
7180#endif
James Smart840eda92020-03-22 11:13:00 -07007181out_free_hba_eq_info:
7182 free_percpu(phba->sli4_hba.eq_info);
James Smart32517fc2019-01-28 11:14:33 -08007183out_free_hba_cpu_map:
7184 kfree(phba->sli4_hba.cpu_map);
James Smart895427b2017-02-12 13:52:30 -08007185out_free_hba_eq_hdl:
7186 kfree(phba->sli4_hba.hba_eq_hdl);
James Smart0c9ab6f2010-02-26 14:15:57 -05007187out_free_fcf_rr_bmask:
7188 kfree(phba->fcf.fcf_rr_bmask);
James Smartda0436e2009-05-22 14:51:39 -04007189out_remove_rpi_hdrs:
7190 lpfc_sli4_remove_rpi_hdrs(phba);
7191out_free_active_sgl:
7192 lpfc_free_active_sgl(phba);
James Smartda0436e2009-05-22 14:51:39 -04007193out_destroy_cq_event_pool:
7194 lpfc_sli4_cq_event_pool_destroy(phba);
James Smartd79c9e92019-08-14 16:57:09 -07007195out_free_cmd_rsp_buf:
7196 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7197 phba->lpfc_cmd_rsp_buf_pool = NULL;
7198out_free_sg_dma_buf:
7199 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7200 phba->lpfc_sg_dma_buf_pool = NULL;
James Smartda0436e2009-05-22 14:51:39 -04007201out_free_bsmbx:
7202 lpfc_destroy_bootstrap_mbox(phba);
7203out_free_mem:
7204 lpfc_mem_free(phba);
7205 return rc;
7206}
7207
7208/**
7209 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
7210 * @phba: pointer to lpfc hba data structure.
7211 *
7212 * This routine is invoked to unset the driver internal resources set up
7213 * specific for supporting the SLI-4 HBA device it attached to.
7214 **/
7215static void
7216lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7217{
7218 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7219
James Smart32517fc2019-01-28 11:14:33 -08007220 free_percpu(phba->sli4_hba.eq_info);
James Smart840eda92020-03-22 11:13:00 -07007221#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7222 free_percpu(phba->sli4_hba.c_stat);
7223#endif
Dick Kennedy317aeb82020-06-30 14:49:59 -07007224 kfree(phba->sli4_hba.idle_stat);
James Smart32517fc2019-01-28 11:14:33 -08007225
James Smart7bb03bb2013-04-17 20:19:16 -04007226 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7227 kfree(phba->sli4_hba.cpu_map);
James Smart222e9232019-01-28 11:14:35 -08007228 phba->sli4_hba.num_possible_cpu = 0;
James Smart7bb03bb2013-04-17 20:19:16 -04007229 phba->sli4_hba.num_present_cpu = 0;
James Smart76fd07a2014-02-20 09:57:18 -05007230 phba->sli4_hba.curr_disp_cpu = 0;
Dick Kennedy3048e3e2020-05-01 14:43:06 -07007231 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
James Smart7bb03bb2013-04-17 20:19:16 -04007232
James Smartda0436e2009-05-22 14:51:39 -04007233 /* Free memory allocated for fast-path work queue handles */
James Smart895427b2017-02-12 13:52:30 -08007234 kfree(phba->sli4_hba.hba_eq_hdl);
James Smartda0436e2009-05-22 14:51:39 -04007235
7236 /* Free the allocated rpi headers. */
7237 lpfc_sli4_remove_rpi_hdrs(phba);
James Smartd11e31d2009-06-10 17:23:06 -04007238 lpfc_sli4_remove_rpis(phba);
James Smartda0436e2009-05-22 14:51:39 -04007239
James Smart0c9ab6f2010-02-26 14:15:57 -05007240 /* Free eligible FCF index bmask */
7241 kfree(phba->fcf.fcf_rr_bmask);
7242
James Smartda0436e2009-05-22 14:51:39 -04007243 /* Free the ELS sgl list */
7244 lpfc_free_active_sgl(phba);
James Smart8a9d2e82012-05-09 21:16:12 -04007245 lpfc_free_els_sgl_list(phba);
James Smartf358dd02017-02-12 13:52:34 -08007246 lpfc_free_nvmet_sgl_list(phba);
James Smartda0436e2009-05-22 14:51:39 -04007247
James Smartda0436e2009-05-22 14:51:39 -04007248 /* Free the completion queue EQ event pool */
7249 lpfc_sli4_cq_event_release_all(phba);
7250 lpfc_sli4_cq_event_pool_destroy(phba);
7251
James Smart6d368e52011-05-24 11:44:12 -04007252 /* Release resource identifiers. */
7253 lpfc_sli4_dealloc_resource_identifiers(phba);
7254
James Smartda0436e2009-05-22 14:51:39 -04007255 /* Free the bsmbx region. */
7256 lpfc_destroy_bootstrap_mbox(phba);
7257
7258 /* Free the SLI Layer memory with SLI4 HBAs */
7259 lpfc_mem_free_all(phba);
7260
7261 /* Free the current connect table */
7262 list_for_each_entry_safe(conn_entry, next_conn_entry,
James Smart4d9ab992009-10-02 15:16:39 -04007263 &phba->fcf_conn_rec_list, list) {
7264 list_del_init(&conn_entry->list);
James Smartda0436e2009-05-22 14:51:39 -04007265 kfree(conn_entry);
James Smart4d9ab992009-10-02 15:16:39 -04007266 }
James Smartda0436e2009-05-22 14:51:39 -04007267
7268 return;
7269}
7270
7271/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007272 * lpfc_init_api_table_setup - Set up init api function jump table
James Smart3772a992009-05-22 14:50:54 -04007273 * @phba: The hba struct for which this call is being executed.
7274 * @dev_grp: The HBA PCI-Device group number.
7275 *
7276 * This routine sets up the device INIT interface API function jump table
7277 * in @phba struct.
7278 *
7279 * Returns: 0 - success, -ENODEV - failure.
7280 **/
7281int
7282lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7283{
James Smart84d1b002010-02-12 14:42:33 -05007284 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7285 phba->lpfc_hba_down_link = lpfc_hba_down_link;
James Smart7f860592011-03-11 16:05:52 -05007286 phba->lpfc_selective_reset = lpfc_selective_reset;
James Smart3772a992009-05-22 14:50:54 -04007287 switch (dev_grp) {
7288 case LPFC_PCI_DEV_LP:
7289 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7290 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7291 phba->lpfc_stop_port = lpfc_stop_port_s3;
7292 break;
James Smartda0436e2009-05-22 14:51:39 -04007293 case LPFC_PCI_DEV_OC:
7294 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7295 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7296 phba->lpfc_stop_port = lpfc_stop_port_s4;
7297 break;
James Smart3772a992009-05-22 14:50:54 -04007298 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07007299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -04007300 "1431 Invalid HBA PCI-device group: 0x%x\n",
7301 dev_grp);
7302 return -ENODEV;
James Smart3772a992009-05-22 14:50:54 -04007303 }
7304 return 0;
7305}
7306
7307/**
James Smart3772a992009-05-22 14:50:54 -04007308 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7309 * @phba: pointer to lpfc hba data structure.
7310 *
7311 * This routine is invoked to set up the driver internal resources after the
7312 * device specific resource setup to support the HBA device it attached to.
7313 *
7314 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02007315 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -04007316 * other values - error
7317 **/
7318static int
7319lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7320{
7321 int error;
7322
7323 /* Startup the kernel thread for this host adapter. */
7324 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7325 "lpfc_worker_%d", phba->brd_no);
7326 if (IS_ERR(phba->worker_thread)) {
7327 error = PTR_ERR(phba->worker_thread);
7328 return error;
Jamie Wellnitz901a9202006-02-28 19:25:19 -05007329 }
7330
James Smart3772a992009-05-22 14:50:54 -04007331 return 0;
7332}
7333
7334/**
7335 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7336 * @phba: pointer to lpfc hba data structure.
7337 *
7338 * This routine is invoked to unset the driver internal resources set up after
7339 * the device specific resource setup for supporting the HBA device it
7340 * attached to.
7341 **/
7342static void
7343lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7344{
Dick Kennedyf485c182017-09-29 17:34:34 -07007345 if (phba->wq) {
7346 flush_workqueue(phba->wq);
7347 destroy_workqueue(phba->wq);
7348 phba->wq = NULL;
7349 }
7350
James Smart3772a992009-05-22 14:50:54 -04007351 /* Stop kernel worker thread */
James Smart0cdb84e2018-04-09 14:24:26 -07007352 if (phba->worker_thread)
7353 kthread_stop(phba->worker_thread);
James Smart3772a992009-05-22 14:50:54 -04007354}
7355
7356/**
7357 * lpfc_free_iocb_list - Free iocb list.
7358 * @phba: pointer to lpfc hba data structure.
7359 *
7360 * This routine is invoked to free the driver's IOCB list and memory.
7361 **/
James Smart6c621a22017-05-15 15:20:45 -07007362void
James Smart3772a992009-05-22 14:50:54 -04007363lpfc_free_iocb_list(struct lpfc_hba *phba)
7364{
7365 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7366
7367 spin_lock_irq(&phba->hbalock);
7368 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7369 &phba->lpfc_iocb_list, list) {
7370 list_del(&iocbq_entry->list);
7371 kfree(iocbq_entry);
7372 phba->total_iocbq_bufs--;
Jamie Wellnitz901a9202006-02-28 19:25:19 -05007373 }
James Smart3772a992009-05-22 14:50:54 -04007374 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05007375
James Smart3772a992009-05-22 14:50:54 -04007376 return;
7377}
dea31012005-04-17 16:05:31 -05007378
James Smart3772a992009-05-22 14:50:54 -04007379/**
7380 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7381 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01007382 * @iocb_count: number of requested iocbs
James Smart3772a992009-05-22 14:50:54 -04007383 *
7384 * This routine is invoked to allocate and initizlize the driver's IOCB
7385 * list and set up the IOCB tag array accordingly.
7386 *
7387 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02007388 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -04007389 * other values - error
7390 **/
James Smart6c621a22017-05-15 15:20:45 -07007391int
James Smart3772a992009-05-22 14:50:54 -04007392lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7393{
7394 struct lpfc_iocbq *iocbq_entry = NULL;
7395 uint16_t iotag;
7396 int i;
dea31012005-04-17 16:05:31 -05007397
7398 /* Initialize and populate the iocb list per host. */
7399 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
James Smart3772a992009-05-22 14:50:54 -04007400 for (i = 0; i < iocb_count; i++) {
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07007401 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea31012005-04-17 16:05:31 -05007402 if (iocbq_entry == NULL) {
7403 printk(KERN_ERR "%s: only allocated %d iocbs of "
7404 "expected %d count. Unloading driver.\n",
James Smarta5f73372019-09-21 20:58:50 -07007405 __func__, i, iocb_count);
dea31012005-04-17 16:05:31 -05007406 goto out_free_iocbq;
7407 }
7408
James Bottomley604a3e32005-10-29 10:28:33 -05007409 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7410 if (iotag == 0) {
James Smart3772a992009-05-22 14:50:54 -04007411 kfree(iocbq_entry);
James Bottomley604a3e32005-10-29 10:28:33 -05007412 printk(KERN_ERR "%s: failed to allocate IOTAG. "
James Smart3772a992009-05-22 14:50:54 -04007413 "Unloading driver.\n", __func__);
James Bottomley604a3e32005-10-29 10:28:33 -05007414 goto out_free_iocbq;
7415 }
James Smart6d368e52011-05-24 11:44:12 -04007416 iocbq_entry->sli4_lxritag = NO_XRI;
James Smart3772a992009-05-22 14:50:54 -04007417 iocbq_entry->sli4_xritag = NO_XRI;
James Smart2e0fef82007-06-17 19:56:36 -05007418
7419 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05007420 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7421 phba->total_iocbq_bufs++;
James Smart2e0fef82007-06-17 19:56:36 -05007422 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05007423 }
7424
James Smart3772a992009-05-22 14:50:54 -04007425 return 0;
7426
7427out_free_iocbq:
7428 lpfc_free_iocb_list(phba);
7429
7430 return -ENOMEM;
7431}
7432
7433/**
James Smart8a9d2e82012-05-09 21:16:12 -04007434 * lpfc_free_sgl_list - Free a given sgl list.
James Smartda0436e2009-05-22 14:51:39 -04007435 * @phba: pointer to lpfc hba data structure.
James Smart8a9d2e82012-05-09 21:16:12 -04007436 * @sglq_list: pointer to the head of sgl list.
James Smartda0436e2009-05-22 14:51:39 -04007437 *
James Smart8a9d2e82012-05-09 21:16:12 -04007438 * This routine is invoked to free a give sgl list and memory.
James Smartda0436e2009-05-22 14:51:39 -04007439 **/
James Smart8a9d2e82012-05-09 21:16:12 -04007440void
7441lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
James Smartda0436e2009-05-22 14:51:39 -04007442{
7443 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
James Smart8a9d2e82012-05-09 21:16:12 -04007444
7445 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7446 list_del(&sglq_entry->list);
7447 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7448 kfree(sglq_entry);
7449 }
7450}
7451
7452/**
7453 * lpfc_free_els_sgl_list - Free els sgl list.
7454 * @phba: pointer to lpfc hba data structure.
7455 *
7456 * This routine is invoked to free the driver's els sgl list and memory.
7457 **/
7458static void
7459lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7460{
James Smartda0436e2009-05-22 14:51:39 -04007461 LIST_HEAD(sglq_list);
James Smartda0436e2009-05-22 14:51:39 -04007462
James Smart8a9d2e82012-05-09 21:16:12 -04007463 /* Retrieve all els sgls from driver list */
James Smarta7892412021-04-11 18:31:15 -07007464 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
James Smart895427b2017-02-12 13:52:30 -08007465 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
James Smarta7892412021-04-11 18:31:15 -07007466 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
James Smartda0436e2009-05-22 14:51:39 -04007467
James Smart8a9d2e82012-05-09 21:16:12 -04007468 /* Now free the sgl list */
7469 lpfc_free_sgl_list(phba, &sglq_list);
James Smartda0436e2009-05-22 14:51:39 -04007470}
7471
7472/**
James Smartf358dd02017-02-12 13:52:34 -08007473 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7474 * @phba: pointer to lpfc hba data structure.
7475 *
7476 * This routine is invoked to free the driver's nvmet sgl list and memory.
7477 **/
7478static void
7479lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7480{
7481 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7482 LIST_HEAD(sglq_list);
7483
7484 /* Retrieve all nvmet sgls from driver list */
7485 spin_lock_irq(&phba->hbalock);
7486 spin_lock(&phba->sli4_hba.sgl_list_lock);
7487 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7488 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7489 spin_unlock_irq(&phba->hbalock);
7490
7491 /* Now free the sgl list */
7492 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7493 list_del(&sglq_entry->list);
7494 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7495 kfree(sglq_entry);
7496 }
Dick Kennedy4b40d022017-08-23 16:55:38 -07007497
7498 /* Update the nvmet_xri_cnt to reflect no current sgls.
7499 * The next initialization cycle sets the count and allocates
7500 * the sgls over again.
7501 */
7502 phba->sli4_hba.nvmet_xri_cnt = 0;
James Smartf358dd02017-02-12 13:52:34 -08007503}
7504
7505/**
James Smartda0436e2009-05-22 14:51:39 -04007506 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7507 * @phba: pointer to lpfc hba data structure.
7508 *
7509 * This routine is invoked to allocate the driver's active sgl memory.
7510 * This array will hold the sglq_entry's for active IOs.
7511 **/
7512static int
7513lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7514{
7515 int size;
7516 size = sizeof(struct lpfc_sglq *);
7517 size *= phba->sli4_hba.max_cfg_param.max_xri;
7518
7519 phba->sli4_hba.lpfc_sglq_active_list =
7520 kzalloc(size, GFP_KERNEL);
7521 if (!phba->sli4_hba.lpfc_sglq_active_list)
7522 return -ENOMEM;
7523 return 0;
7524}
7525
7526/**
7527 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7528 * @phba: pointer to lpfc hba data structure.
7529 *
7530 * This routine is invoked to walk through the array of active sglq entries
7531 * and free all of the resources.
7532 * This is just a place holder for now.
7533 **/
7534static void
7535lpfc_free_active_sgl(struct lpfc_hba *phba)
7536{
7537 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7538}
7539
7540/**
7541 * lpfc_init_sgl_list - Allocate and initialize sgl list.
7542 * @phba: pointer to lpfc hba data structure.
7543 *
7544 * This routine is invoked to allocate and initizlize the driver's sgl
7545 * list and set up the sgl xritag tag array accordingly.
7546 *
James Smartda0436e2009-05-22 14:51:39 -04007547 **/
James Smart8a9d2e82012-05-09 21:16:12 -04007548static void
James Smartda0436e2009-05-22 14:51:39 -04007549lpfc_init_sgl_list(struct lpfc_hba *phba)
7550{
James Smartda0436e2009-05-22 14:51:39 -04007551 /* Initialize and populate the sglq list per host/VF. */
James Smart895427b2017-02-12 13:52:30 -08007552 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
James Smartda0436e2009-05-22 14:51:39 -04007553 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
James Smartf358dd02017-02-12 13:52:34 -08007554 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
James Smart86c67372017-04-21 16:05:04 -07007555 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
James Smartda0436e2009-05-22 14:51:39 -04007556
James Smart8a9d2e82012-05-09 21:16:12 -04007557 /* els xri-sgl book keeping */
7558 phba->sli4_hba.els_xri_cnt = 0;
James Smartda0436e2009-05-22 14:51:39 -04007559
James Smart895427b2017-02-12 13:52:30 -08007560 /* nvme xri-buffer book keeping */
James Smart5e5b5112019-01-28 11:14:22 -08007561 phba->sli4_hba.io_xri_cnt = 0;
James Smartda0436e2009-05-22 14:51:39 -04007562}
7563
7564/**
7565 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7566 * @phba: pointer to lpfc hba data structure.
7567 *
7568 * This routine is invoked to post rpi header templates to the
James Smart88a2cfb2011-07-22 18:36:33 -04007569 * port for those SLI4 ports that do not support extents. This routine
James Smartda0436e2009-05-22 14:51:39 -04007570 * posts a PAGE_SIZE memory region to the port to hold up to
James Smart88a2cfb2011-07-22 18:36:33 -04007571 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7572 * and should be called only when interrupts are disabled.
James Smartda0436e2009-05-22 14:51:39 -04007573 *
7574 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02007575 * 0 - successful
James Smart88a2cfb2011-07-22 18:36:33 -04007576 * -ERROR - otherwise.
James Smartda0436e2009-05-22 14:51:39 -04007577 **/
7578int
7579lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7580{
7581 int rc = 0;
James Smartda0436e2009-05-22 14:51:39 -04007582 struct lpfc_rpi_hdr *rpi_hdr;
7583
7584 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
James Smartff78d8f2011-12-13 13:21:35 -05007585 if (!phba->sli4_hba.rpi_hdrs_in_use)
James Smart6d368e52011-05-24 11:44:12 -04007586 return rc;
James Smart6d368e52011-05-24 11:44:12 -04007587 if (phba->sli4_hba.extents_in_use)
7588 return -EIO;
James Smartda0436e2009-05-22 14:51:39 -04007589
7590 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7591 if (!rpi_hdr) {
Dick Kennedy372c1872020-06-30 14:50:00 -07007592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04007593 "0391 Error during rpi post operation\n");
7594 lpfc_sli4_remove_rpis(phba);
7595 rc = -ENODEV;
7596 }
7597
7598 return rc;
7599}
7600
7601/**
7602 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7603 * @phba: pointer to lpfc hba data structure.
7604 *
7605 * This routine is invoked to allocate a single 4KB memory region to
7606 * support rpis and stores them in the phba. This single region
7607 * provides support for up to 64 rpis. The region is used globally
7608 * by the device.
7609 *
7610 * Returns:
7611 * A valid rpi hdr on success.
7612 * A NULL pointer on any failure.
7613 **/
7614struct lpfc_rpi_hdr *
7615lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7616{
7617 uint16_t rpi_limit, curr_rpi_range;
7618 struct lpfc_dmabuf *dmabuf;
7619 struct lpfc_rpi_hdr *rpi_hdr;
7620
James Smart6d368e52011-05-24 11:44:12 -04007621 /*
7622 * If the SLI4 port supports extents, posting the rpi header isn't
7623 * required. Set the expected maximum count and let the actual value
7624 * get set when extents are fully allocated.
7625 */
7626 if (!phba->sli4_hba.rpi_hdrs_in_use)
7627 return NULL;
7628 if (phba->sli4_hba.extents_in_use)
7629 return NULL;
7630
7631 /* The limit on the logical index is just the max_rpi count. */
James Smart845d9e82017-05-15 15:20:38 -07007632 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
James Smartda0436e2009-05-22 14:51:39 -04007633
7634 spin_lock_irq(&phba->hbalock);
James Smart6d368e52011-05-24 11:44:12 -04007635 /*
7636 * Establish the starting RPI in this header block. The starting
7637 * rpi is normalized to a zero base because the physical rpi is
7638 * port based.
7639 */
James Smart97f2ecf2012-03-01 22:35:23 -05007640 curr_rpi_range = phba->sli4_hba.next_rpi;
James Smartda0436e2009-05-22 14:51:39 -04007641 spin_unlock_irq(&phba->hbalock);
7642
James Smart845d9e82017-05-15 15:20:38 -07007643 /* Reached full RPI range */
7644 if (curr_rpi_range == rpi_limit)
James Smart6d368e52011-05-24 11:44:12 -04007645 return NULL;
James Smart845d9e82017-05-15 15:20:38 -07007646
James Smartda0436e2009-05-22 14:51:39 -04007647 /*
7648 * First allocate the protocol header region for the port. The
7649 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7650 */
7651 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7652 if (!dmabuf)
7653 return NULL;
7654
Luis Chamberlain750afb02019-01-04 09:23:09 +01007655 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7656 LPFC_HDR_TEMPLATE_SIZE,
7657 &dmabuf->phys, GFP_KERNEL);
James Smartda0436e2009-05-22 14:51:39 -04007658 if (!dmabuf->virt) {
7659 rpi_hdr = NULL;
7660 goto err_free_dmabuf;
7661 }
7662
James Smartda0436e2009-05-22 14:51:39 -04007663 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7664 rpi_hdr = NULL;
7665 goto err_free_coherent;
7666 }
7667
7668 /* Save the rpi header data for cleanup later. */
7669 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7670 if (!rpi_hdr)
7671 goto err_free_coherent;
7672
7673 rpi_hdr->dmabuf = dmabuf;
7674 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7675 rpi_hdr->page_count = 1;
7676 spin_lock_irq(&phba->hbalock);
James Smart6d368e52011-05-24 11:44:12 -04007677
7678 /* The rpi_hdr stores the logical index only. */
7679 rpi_hdr->start_rpi = curr_rpi_range;
James Smart845d9e82017-05-15 15:20:38 -07007680 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
James Smartda0436e2009-05-22 14:51:39 -04007681 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7682
James Smartda0436e2009-05-22 14:51:39 -04007683 spin_unlock_irq(&phba->hbalock);
7684 return rpi_hdr;
7685
7686 err_free_coherent:
7687 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7688 dmabuf->virt, dmabuf->phys);
7689 err_free_dmabuf:
7690 kfree(dmabuf);
7691 return NULL;
7692}
7693
7694/**
7695 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7696 * @phba: pointer to lpfc hba data structure.
7697 *
7698 * This routine is invoked to remove all memory resources allocated
James Smart6d368e52011-05-24 11:44:12 -04007699 * to support rpis for SLI4 ports not supporting extents. This routine
7700 * presumes the caller has released all rpis consumed by fabric or port
7701 * logins and is prepared to have the header pages removed.
James Smartda0436e2009-05-22 14:51:39 -04007702 **/
7703void
7704lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7705{
7706 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7707
James Smart6d368e52011-05-24 11:44:12 -04007708 if (!phba->sli4_hba.rpi_hdrs_in_use)
7709 goto exit;
7710
James Smartda0436e2009-05-22 14:51:39 -04007711 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7712 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7713 list_del(&rpi_hdr->list);
7714 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7715 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7716 kfree(rpi_hdr->dmabuf);
7717 kfree(rpi_hdr);
7718 }
James Smart6d368e52011-05-24 11:44:12 -04007719 exit:
7720 /* There are no rpis available to the port now. */
7721 phba->sli4_hba.next_rpi = 0;
James Smartda0436e2009-05-22 14:51:39 -04007722}
7723
7724/**
James Smart3772a992009-05-22 14:50:54 -04007725 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7726 * @pdev: pointer to pci device data structure.
7727 *
7728 * This routine is invoked to allocate the driver hba data structure for an
7729 * HBA device. If the allocation is successful, the phba reference to the
7730 * PCI device data structure is set.
7731 *
7732 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02007733 * pointer to @phba - successful
James Smart3772a992009-05-22 14:50:54 -04007734 * NULL - error
7735 **/
7736static struct lpfc_hba *
7737lpfc_hba_alloc(struct pci_dev *pdev)
7738{
7739 struct lpfc_hba *phba;
7740
7741 /* Allocate memory for HBA structure */
7742 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7743 if (!phba) {
Jiri Slabye34ccdf2009-07-13 23:25:54 +02007744 dev_err(&pdev->dev, "failed to allocate hba struct\n");
James Smart3772a992009-05-22 14:50:54 -04007745 return NULL;
7746 }
7747
7748 /* Set reference to PCI device in HBA structure */
7749 phba->pcidev = pdev;
7750
7751 /* Assign an unused board number */
7752 phba->brd_no = lpfc_get_instance();
7753 if (phba->brd_no < 0) {
7754 kfree(phba);
7755 return NULL;
7756 }
James Smart65791f12016-07-06 12:35:56 -07007757 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
James Smart3772a992009-05-22 14:50:54 -04007758
James Smart4fede782010-01-26 23:08:55 -05007759 spin_lock_init(&phba->ct_ev_lock);
James Smartf1c3b0f2009-07-19 10:01:32 -04007760 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7761
James Smart3772a992009-05-22 14:50:54 -04007762 return phba;
7763}
7764
7765/**
7766 * lpfc_hba_free - Free driver hba data structure with a device.
7767 * @phba: pointer to lpfc hba data structure.
7768 *
7769 * This routine is invoked to free the driver hba data structure with an
7770 * HBA device.
7771 **/
7772static void
7773lpfc_hba_free(struct lpfc_hba *phba)
7774{
James Smart5e5b5112019-01-28 11:14:22 -08007775 if (phba->sli_rev == LPFC_SLI_REV4)
7776 kfree(phba->sli4_hba.hdwq);
7777
James Smart3772a992009-05-22 14:50:54 -04007778 /* Release the driver assigned board number */
7779 idr_remove(&lpfc_hba_index, phba->brd_no);
7780
James Smart895427b2017-02-12 13:52:30 -08007781 /* Free memory allocated with sli3 rings */
7782 kfree(phba->sli.sli3_ring);
7783 phba->sli.sli3_ring = NULL;
James Smart2a76a282012-08-03 12:35:54 -04007784
James Smart3772a992009-05-22 14:50:54 -04007785 kfree(phba);
7786 return;
7787}
7788
7789/**
7790 * lpfc_create_shost - Create hba physical port with associated scsi host.
7791 * @phba: pointer to lpfc hba data structure.
7792 *
7793 * This routine is invoked to create HBA physical port and associate a SCSI
7794 * host with it.
7795 *
7796 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02007797 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -04007798 * other values - error
7799 **/
7800static int
7801lpfc_create_shost(struct lpfc_hba *phba)
7802{
7803 struct lpfc_vport *vport;
7804 struct Scsi_Host *shost;
7805
7806 /* Initialize HBA FC structure */
dea31012005-04-17 16:05:31 -05007807 phba->fc_edtov = FF_DEF_EDTOV;
7808 phba->fc_ratov = FF_DEF_RATOV;
7809 phba->fc_altov = FF_DEF_ALTOV;
7810 phba->fc_arbtov = FF_DEF_ARBTOV;
7811
James Smartd7c47992010-06-08 18:31:54 -04007812 atomic_set(&phba->sdev_cnt, 0);
James Smart3de2a652007-08-02 11:09:59 -04007813 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
James Smart2e0fef82007-06-17 19:56:36 -05007814 if (!vport)
James Smart3772a992009-05-22 14:50:54 -04007815 return -ENODEV;
James Smart2e0fef82007-06-17 19:56:36 -05007816
7817 shost = lpfc_shost_from_vport(vport);
James Smart2e0fef82007-06-17 19:56:36 -05007818 phba->pport = vport;
James Smart2ea259e2017-02-12 13:52:27 -08007819
James Smartf358dd02017-02-12 13:52:34 -08007820 if (phba->nvmet_support) {
7821 /* Only 1 vport (pport) will support NVME target */
James Smartea85a202019-10-18 14:18:25 -07007822 phba->targetport = NULL;
7823 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7824 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7825 "6076 NVME Target Found\n");
James Smartf358dd02017-02-12 13:52:34 -08007826 }
7827
James Smart858c9f62007-06-17 19:56:39 -05007828 lpfc_debugfs_initialize(vport);
James Smart3772a992009-05-22 14:50:54 -04007829 /* Put reference to SCSI host to driver's device private data */
7830 pci_set_drvdata(phba->pcidev, shost);
James Smart2e0fef82007-06-17 19:56:36 -05007831
James Smart4258e982015-12-16 18:11:58 -05007832 /*
7833 * At this point we are fully registered with PSA. In addition,
7834 * any initial discovery should be completed.
7835 */
7836 vport->load_flag |= FC_ALLOW_FDMI;
James Smart8663cbb2016-03-31 14:12:33 -07007837 if (phba->cfg_enable_SmartSAN ||
7838 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
James Smart4258e982015-12-16 18:11:58 -05007839
7840 /* Setup appropriate attribute masks */
7841 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
James Smart8663cbb2016-03-31 14:12:33 -07007842 if (phba->cfg_enable_SmartSAN)
James Smart4258e982015-12-16 18:11:58 -05007843 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7844 else
7845 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7846 }
James Smart3772a992009-05-22 14:50:54 -04007847 return 0;
7848}
dea31012005-04-17 16:05:31 -05007849
James Smart3772a992009-05-22 14:50:54 -04007850/**
7851 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7852 * @phba: pointer to lpfc hba data structure.
7853 *
7854 * This routine is invoked to destroy HBA physical port and the associated
7855 * SCSI host.
7856 **/
7857static void
7858lpfc_destroy_shost(struct lpfc_hba *phba)
7859{
7860 struct lpfc_vport *vport = phba->pport;
James Smart93996272008-08-24 21:50:30 -04007861
James Smart3772a992009-05-22 14:50:54 -04007862 /* Destroy physical port that associated with the SCSI host */
7863 destroy_port(vport);
7864
7865 return;
7866}
7867
7868/**
7869 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7870 * @phba: pointer to lpfc hba data structure.
7871 * @shost: the shost to be used to detect Block guard settings.
7872 *
7873 * This routine sets up the local Block guard protocol settings for @shost.
7874 * This routine also allocates memory for debugging bg buffers.
7875 **/
7876static void
7877lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7878{
James Smartbbeb79b2012-06-12 13:54:27 -04007879 uint32_t old_mask;
7880 uint32_t old_guard;
7881
James Smartb3b98b72016-10-13 15:06:06 -07007882 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
James Smart3772a992009-05-22 14:50:54 -04007883 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7884 "1478 Registering BlockGuard with the "
7885 "SCSI layer\n");
James Smartbbeb79b2012-06-12 13:54:27 -04007886
James Smartb3b98b72016-10-13 15:06:06 -07007887 old_mask = phba->cfg_prot_mask;
7888 old_guard = phba->cfg_prot_guard;
James Smartbbeb79b2012-06-12 13:54:27 -04007889
7890 /* Only allow supported values */
James Smartb3b98b72016-10-13 15:06:06 -07007891 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
James Smartbbeb79b2012-06-12 13:54:27 -04007892 SHOST_DIX_TYPE0_PROTECTION |
7893 SHOST_DIX_TYPE1_PROTECTION);
James Smartb3b98b72016-10-13 15:06:06 -07007894 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7895 SHOST_DIX_GUARD_CRC);
James Smartbbeb79b2012-06-12 13:54:27 -04007896
7897 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
James Smartb3b98b72016-10-13 15:06:06 -07007898 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7899 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
James Smartbbeb79b2012-06-12 13:54:27 -04007900
James Smartb3b98b72016-10-13 15:06:06 -07007901 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7902 if ((old_mask != phba->cfg_prot_mask) ||
7903 (old_guard != phba->cfg_prot_guard))
Dick Kennedy372c1872020-06-30 14:50:00 -07007904 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartbbeb79b2012-06-12 13:54:27 -04007905 "1475 Registering BlockGuard with the "
7906 "SCSI layer: mask %d guard %d\n",
James Smartb3b98b72016-10-13 15:06:06 -07007907 phba->cfg_prot_mask,
7908 phba->cfg_prot_guard);
James Smartbbeb79b2012-06-12 13:54:27 -04007909
James Smartb3b98b72016-10-13 15:06:06 -07007910 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7911 scsi_host_set_guard(shost, phba->cfg_prot_guard);
James Smartbbeb79b2012-06-12 13:54:27 -04007912 } else
Dick Kennedy372c1872020-06-30 14:50:00 -07007913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartbbeb79b2012-06-12 13:54:27 -04007914 "1479 Not Registering BlockGuard with the SCSI "
7915 "layer, Bad protection parameters: %d %d\n",
7916 old_mask, old_guard);
James Smart98c9ea52007-10-27 13:37:33 -04007917 }
James Smart3772a992009-05-22 14:50:54 -04007918}
7919
7920/**
7921 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7922 * @phba: pointer to lpfc hba data structure.
7923 *
7924 * This routine is invoked to perform all the necessary post initialization
7925 * setup for the device.
7926 **/
7927static void
7928lpfc_post_init_setup(struct lpfc_hba *phba)
7929{
7930 struct Scsi_Host *shost;
7931 struct lpfc_adapter_event_header adapter_event;
7932
7933 /* Get the default values for Model Name and Description */
7934 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7935
7936 /*
7937 * hba setup may have changed the hba_queue_depth so we need to
7938 * adjust the value of can_queue.
7939 */
7940 shost = pci_get_drvdata(phba->pcidev);
7941 shost->can_queue = phba->cfg_hba_queue_depth - 10;
James Smart858c9f62007-06-17 19:56:39 -05007942
7943 lpfc_host_attrib_init(shost);
7944
James Smart2e0fef82007-06-17 19:56:36 -05007945 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7946 spin_lock_irq(shost->host_lock);
7947 lpfc_poll_start_timer(phba);
7948 spin_unlock_irq(shost->host_lock);
7949 }
James Smart8f6d98d2006-08-01 07:34:00 -04007950
James Smart93996272008-08-24 21:50:30 -04007951 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7952 "0428 Perform SCSI scan\n");
James Smartea2151b2008-09-07 11:52:10 -04007953 /* Send board arrival event to upper layer */
7954 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7955 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7956 fc_host_post_vendor_event(shost, fc_get_event_number(),
James Smart3772a992009-05-22 14:50:54 -04007957 sizeof(adapter_event),
7958 (char *) &adapter_event,
7959 LPFC_NL_VENDOR_ID);
7960 return;
7961}
7962
7963/**
7964 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7965 * @phba: pointer to lpfc hba data structure.
7966 *
7967 * This routine is invoked to set up the PCI device memory space for device
7968 * with SLI-3 interface spec.
7969 *
7970 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02007971 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -04007972 * other values - error
7973 **/
7974static int
7975lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7976{
Christoph Hellwigf30e1bf2018-10-18 15:10:21 +02007977 struct pci_dev *pdev = phba->pcidev;
James Smart3772a992009-05-22 14:50:54 -04007978 unsigned long bar0map_len, bar2map_len;
7979 int i, hbq_count;
7980 void *ptr;
Hannes Reinecke56de8352019-02-18 08:34:19 +01007981 int error;
James Smart3772a992009-05-22 14:50:54 -04007982
Christoph Hellwigf30e1bf2018-10-18 15:10:21 +02007983 if (!pdev)
Hannes Reinecke56de8352019-02-18 08:34:19 +01007984 return -ENODEV;
James Smart3772a992009-05-22 14:50:54 -04007985
7986 /* Set the device DMA mask size */
Hannes Reinecke56de8352019-02-18 08:34:19 +01007987 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7988 if (error)
7989 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7990 if (error)
Christoph Hellwigf30e1bf2018-10-18 15:10:21 +02007991 return error;
Hannes Reinecke56de8352019-02-18 08:34:19 +01007992 error = -ENODEV;
James Smart3772a992009-05-22 14:50:54 -04007993
7994 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7995 * required by each mapping.
7996 */
7997 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7998 bar0map_len = pci_resource_len(pdev, 0);
7999
8000 phba->pci_bar2_map = pci_resource_start(pdev, 2);
8001 bar2map_len = pci_resource_len(pdev, 2);
8002
8003 /* Map HBA SLIM to a kernel virtual address. */
8004 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
8005 if (!phba->slim_memmap_p) {
8006 dev_printk(KERN_ERR, &pdev->dev,
8007 "ioremap failed for SLIM memory.\n");
8008 goto out;
8009 }
8010
8011 /* Map HBA Control Registers to a kernel virtual address. */
8012 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
8013 if (!phba->ctrl_regs_memmap_p) {
8014 dev_printk(KERN_ERR, &pdev->dev,
8015 "ioremap failed for HBA control registers.\n");
8016 goto out_iounmap_slim;
8017 }
8018
8019 /* Allocate memory for SLI-2 structures */
Luis Chamberlain750afb02019-01-04 09:23:09 +01008020 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8021 &phba->slim2p.phys, GFP_KERNEL);
James Smart3772a992009-05-22 14:50:54 -04008022 if (!phba->slim2p.virt)
8023 goto out_iounmap;
8024
James Smart3772a992009-05-22 14:50:54 -04008025 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
James Smart7a470272010-03-15 11:25:20 -04008026 phba->mbox_ext = (phba->slim2p.virt +
8027 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
James Smart3772a992009-05-22 14:50:54 -04008028 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
8029 phba->IOCBs = (phba->slim2p.virt +
8030 offsetof(struct lpfc_sli2_slim, IOCBs));
8031
8032 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
8033 lpfc_sli_hbq_size(),
8034 &phba->hbqslimp.phys,
8035 GFP_KERNEL);
8036 if (!phba->hbqslimp.virt)
8037 goto out_free_slim;
8038
8039 hbq_count = lpfc_sli_hbq_count();
8040 ptr = phba->hbqslimp.virt;
8041 for (i = 0; i < hbq_count; ++i) {
8042 phba->hbqs[i].hbq_virt = ptr;
8043 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
8044 ptr += (lpfc_hbq_defs[i]->entry_count *
8045 sizeof(struct lpfc_hbq_entry));
8046 }
8047 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
8048 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
8049
8050 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
8051
James Smart3772a992009-05-22 14:50:54 -04008052 phba->MBslimaddr = phba->slim_memmap_p;
8053 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
8054 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
8055 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
8056 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
James Smartea2151b2008-09-07 11:52:10 -04008057
dea31012005-04-17 16:05:31 -05008058 return 0;
8059
dea31012005-04-17 16:05:31 -05008060out_free_slim:
James Smart34b02dc2008-08-24 21:49:55 -04008061 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8062 phba->slim2p.virt, phba->slim2p.phys);
dea31012005-04-17 16:05:31 -05008063out_iounmap:
8064 iounmap(phba->ctrl_regs_memmap_p);
Jamie Wellnitz901a9202006-02-28 19:25:19 -05008065out_iounmap_slim:
dea31012005-04-17 16:05:31 -05008066 iounmap(phba->slim_memmap_p);
dea31012005-04-17 16:05:31 -05008067out:
8068 return error;
8069}
8070
James Smarte59058c2008-08-24 21:49:00 -04008071/**
James Smart3772a992009-05-22 14:50:54 -04008072 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
8073 * @phba: pointer to lpfc hba data structure.
8074 *
8075 * This routine is invoked to unset the PCI device memory space for device
8076 * with SLI-3 interface spec.
8077 **/
8078static void
8079lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
8080{
8081 struct pci_dev *pdev;
8082
8083 /* Obtain PCI device reference */
8084 if (!phba->pcidev)
8085 return;
8086 else
8087 pdev = phba->pcidev;
8088
8089 /* Free coherent DMA memory allocated */
8090 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8091 phba->hbqslimp.virt, phba->hbqslimp.phys);
8092 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8093 phba->slim2p.virt, phba->slim2p.phys);
8094
8095 /* I/O memory unmap */
8096 iounmap(phba->ctrl_regs_memmap_p);
8097 iounmap(phba->slim_memmap_p);
8098
8099 return;
8100}
8101
8102/**
James Smartda0436e2009-05-22 14:51:39 -04008103 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
8104 * @phba: pointer to lpfc hba data structure.
8105 *
8106 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
8107 * done and check status.
8108 *
8109 * Return 0 if successful, otherwise -ENODEV.
8110 **/
8111int
8112lpfc_sli4_post_status_check(struct lpfc_hba *phba)
8113{
James Smart2fcee4b2010-12-15 17:57:46 -05008114 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
8115 struct lpfc_register reg_data;
8116 int i, port_error = 0;
8117 uint32_t if_type;
James Smartda0436e2009-05-22 14:51:39 -04008118
James Smart9940b972011-03-11 16:06:12 -05008119 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
8120 memset(&reg_data, 0, sizeof(reg_data));
James Smart2fcee4b2010-12-15 17:57:46 -05008121 if (!phba->sli4_hba.PSMPHRregaddr)
James Smartda0436e2009-05-22 14:51:39 -04008122 return -ENODEV;
8123
James Smartda0436e2009-05-22 14:51:39 -04008124 /* Wait up to 30 seconds for the SLI Port POST done and ready */
8125 for (i = 0; i < 3000; i++) {
James Smart9940b972011-03-11 16:06:12 -05008126 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8127 &portsmphr_reg.word0) ||
8128 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
James Smart2fcee4b2010-12-15 17:57:46 -05008129 /* Port has a fatal POST error, break out */
James Smartda0436e2009-05-22 14:51:39 -04008130 port_error = -ENODEV;
8131 break;
8132 }
James Smart2fcee4b2010-12-15 17:57:46 -05008133 if (LPFC_POST_STAGE_PORT_READY ==
8134 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
James Smartda0436e2009-05-22 14:51:39 -04008135 break;
James Smartda0436e2009-05-22 14:51:39 -04008136 msleep(10);
8137 }
8138
James Smart2fcee4b2010-12-15 17:57:46 -05008139 /*
8140 * If there was a port error during POST, then don't proceed with
8141 * other register reads as the data may not be valid. Just exit.
8142 */
8143 if (port_error) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008144 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -05008145 "1408 Port Failed POST - portsmphr=0x%x, "
8146 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8147 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8148 portsmphr_reg.word0,
8149 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8150 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8151 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8152 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8153 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8154 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8155 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8156 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8157 } else {
James Smart28baac72010-02-12 14:42:03 -05008158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart2fcee4b2010-12-15 17:57:46 -05008159 "2534 Device Info: SLIFamily=0x%x, "
8160 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8161 "SLIHint_2=0x%x, FT=0x%x\n",
James Smart28baac72010-02-12 14:42:03 -05008162 bf_get(lpfc_sli_intf_sli_family,
8163 &phba->sli4_hba.sli_intf),
8164 bf_get(lpfc_sli_intf_slirev,
8165 &phba->sli4_hba.sli_intf),
James Smart085c6472010-11-20 23:11:37 -05008166 bf_get(lpfc_sli_intf_if_type,
James Smart28baac72010-02-12 14:42:03 -05008167 &phba->sli4_hba.sli_intf),
James Smart085c6472010-11-20 23:11:37 -05008168 bf_get(lpfc_sli_intf_sli_hint1,
8169 &phba->sli4_hba.sli_intf),
8170 bf_get(lpfc_sli_intf_sli_hint2,
8171 &phba->sli4_hba.sli_intf),
8172 bf_get(lpfc_sli_intf_func_type,
James Smart28baac72010-02-12 14:42:03 -05008173 &phba->sli4_hba.sli_intf));
James Smart2fcee4b2010-12-15 17:57:46 -05008174 /*
8175 * Check for other Port errors during the initialization
8176 * process. Fail the load if the port did not come up
8177 * correctly.
8178 */
8179 if_type = bf_get(lpfc_sli_intf_if_type,
8180 &phba->sli4_hba.sli_intf);
8181 switch (if_type) {
8182 case LPFC_SLI_INTF_IF_TYPE_0:
8183 phba->sli4_hba.ue_mask_lo =
8184 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8185 phba->sli4_hba.ue_mask_hi =
8186 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8187 uerrlo_reg.word0 =
8188 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8189 uerrhi_reg.word0 =
8190 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8191 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8192 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008193 lpfc_printf_log(phba, KERN_ERR,
8194 LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -05008195 "1422 Unrecoverable Error "
8196 "Detected during POST "
8197 "uerr_lo_reg=0x%x, "
8198 "uerr_hi_reg=0x%x, "
8199 "ue_mask_lo_reg=0x%x, "
8200 "ue_mask_hi_reg=0x%x\n",
8201 uerrlo_reg.word0,
8202 uerrhi_reg.word0,
8203 phba->sli4_hba.ue_mask_lo,
8204 phba->sli4_hba.ue_mask_hi);
8205 port_error = -ENODEV;
8206 }
8207 break;
8208 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart27d6ac02018-02-22 08:18:42 -08008209 case LPFC_SLI_INTF_IF_TYPE_6:
James Smart2fcee4b2010-12-15 17:57:46 -05008210 /* Final checks. The port status should be clean. */
James Smart9940b972011-03-11 16:06:12 -05008211 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8212 &reg_data.word0) ||
James Smart05580562011-05-24 11:40:48 -04008213 (bf_get(lpfc_sliport_status_err, &reg_data) &&
8214 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
James Smart2fcee4b2010-12-15 17:57:46 -05008215 phba->work_status[0] =
8216 readl(phba->sli4_hba.u.if_type2.
8217 ERR1regaddr);
8218 phba->work_status[1] =
8219 readl(phba->sli4_hba.u.if_type2.
8220 ERR2regaddr);
Dick Kennedy372c1872020-06-30 14:50:00 -07008221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart8fcb8ac2012-03-01 22:35:58 -05008222 "2888 Unrecoverable port error "
8223 "following POST: port status reg "
8224 "0x%x, port_smphr reg 0x%x, "
James Smart2fcee4b2010-12-15 17:57:46 -05008225 "error 1=0x%x, error 2=0x%x\n",
8226 reg_data.word0,
8227 portsmphr_reg.word0,
8228 phba->work_status[0],
8229 phba->work_status[1]);
8230 port_error = -ENODEV;
8231 }
8232 break;
8233 case LPFC_SLI_INTF_IF_TYPE_1:
8234 default:
8235 break;
8236 }
James Smart28baac72010-02-12 14:42:03 -05008237 }
James Smartda0436e2009-05-22 14:51:39 -04008238 return port_error;
8239}
8240
8241/**
8242 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8243 * @phba: pointer to lpfc hba data structure.
James Smart2fcee4b2010-12-15 17:57:46 -05008244 * @if_type: The SLI4 interface type getting configured.
James Smartda0436e2009-05-22 14:51:39 -04008245 *
8246 * This routine is invoked to set up SLI4 BAR0 PCI config space register
8247 * memory map.
8248 **/
8249static void
James Smart2fcee4b2010-12-15 17:57:46 -05008250lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
James Smartda0436e2009-05-22 14:51:39 -04008251{
James Smart2fcee4b2010-12-15 17:57:46 -05008252 switch (if_type) {
8253 case LPFC_SLI_INTF_IF_TYPE_0:
8254 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8255 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8256 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8257 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8258 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8259 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8260 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8261 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8262 phba->sli4_hba.SLIINTFregaddr =
8263 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8264 break;
8265 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart0cf07f842017-06-01 21:07:10 -07008266 phba->sli4_hba.u.if_type2.EQDregaddr =
8267 phba->sli4_hba.conf_regs_memmap_p +
8268 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
James Smart2fcee4b2010-12-15 17:57:46 -05008269 phba->sli4_hba.u.if_type2.ERR1regaddr =
James Smart88a2cfb2011-07-22 18:36:33 -04008270 phba->sli4_hba.conf_regs_memmap_p +
8271 LPFC_CTL_PORT_ER1_OFFSET;
James Smart2fcee4b2010-12-15 17:57:46 -05008272 phba->sli4_hba.u.if_type2.ERR2regaddr =
James Smart88a2cfb2011-07-22 18:36:33 -04008273 phba->sli4_hba.conf_regs_memmap_p +
8274 LPFC_CTL_PORT_ER2_OFFSET;
James Smart2fcee4b2010-12-15 17:57:46 -05008275 phba->sli4_hba.u.if_type2.CTRLregaddr =
James Smart88a2cfb2011-07-22 18:36:33 -04008276 phba->sli4_hba.conf_regs_memmap_p +
8277 LPFC_CTL_PORT_CTL_OFFSET;
James Smart2fcee4b2010-12-15 17:57:46 -05008278 phba->sli4_hba.u.if_type2.STATUSregaddr =
James Smart88a2cfb2011-07-22 18:36:33 -04008279 phba->sli4_hba.conf_regs_memmap_p +
8280 LPFC_CTL_PORT_STA_OFFSET;
James Smart2fcee4b2010-12-15 17:57:46 -05008281 phba->sli4_hba.SLIINTFregaddr =
8282 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8283 phba->sli4_hba.PSMPHRregaddr =
James Smart88a2cfb2011-07-22 18:36:33 -04008284 phba->sli4_hba.conf_regs_memmap_p +
8285 LPFC_CTL_PORT_SEM_OFFSET;
James Smart2fcee4b2010-12-15 17:57:46 -05008286 phba->sli4_hba.RQDBregaddr =
James Smart962bc512013-01-03 15:44:00 -05008287 phba->sli4_hba.conf_regs_memmap_p +
8288 LPFC_ULP0_RQ_DOORBELL;
James Smart2fcee4b2010-12-15 17:57:46 -05008289 phba->sli4_hba.WQDBregaddr =
James Smart962bc512013-01-03 15:44:00 -05008290 phba->sli4_hba.conf_regs_memmap_p +
8291 LPFC_ULP0_WQ_DOORBELL;
James Smart9dd35422018-02-22 08:18:41 -08008292 phba->sli4_hba.CQDBregaddr =
James Smart2fcee4b2010-12-15 17:57:46 -05008293 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
James Smart9dd35422018-02-22 08:18:41 -08008294 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
James Smart2fcee4b2010-12-15 17:57:46 -05008295 phba->sli4_hba.MQDBregaddr =
8296 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8297 phba->sli4_hba.BMBXregaddr =
8298 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8299 break;
James Smart27d6ac02018-02-22 08:18:42 -08008300 case LPFC_SLI_INTF_IF_TYPE_6:
8301 phba->sli4_hba.u.if_type2.EQDregaddr =
8302 phba->sli4_hba.conf_regs_memmap_p +
8303 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8304 phba->sli4_hba.u.if_type2.ERR1regaddr =
8305 phba->sli4_hba.conf_regs_memmap_p +
8306 LPFC_CTL_PORT_ER1_OFFSET;
8307 phba->sli4_hba.u.if_type2.ERR2regaddr =
8308 phba->sli4_hba.conf_regs_memmap_p +
8309 LPFC_CTL_PORT_ER2_OFFSET;
8310 phba->sli4_hba.u.if_type2.CTRLregaddr =
8311 phba->sli4_hba.conf_regs_memmap_p +
8312 LPFC_CTL_PORT_CTL_OFFSET;
8313 phba->sli4_hba.u.if_type2.STATUSregaddr =
8314 phba->sli4_hba.conf_regs_memmap_p +
8315 LPFC_CTL_PORT_STA_OFFSET;
8316 phba->sli4_hba.PSMPHRregaddr =
8317 phba->sli4_hba.conf_regs_memmap_p +
8318 LPFC_CTL_PORT_SEM_OFFSET;
8319 phba->sli4_hba.BMBXregaddr =
8320 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8321 break;
James Smart2fcee4b2010-12-15 17:57:46 -05008322 case LPFC_SLI_INTF_IF_TYPE_1:
8323 default:
8324 dev_printk(KERN_ERR, &phba->pcidev->dev,
8325 "FATAL - unsupported SLI4 interface type - %d\n",
8326 if_type);
8327 break;
8328 }
James Smartda0436e2009-05-22 14:51:39 -04008329}
8330
8331/**
8332 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8333 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01008334 * @if_type: sli if type to operate on.
James Smartda0436e2009-05-22 14:51:39 -04008335 *
James Smart27d6ac02018-02-22 08:18:42 -08008336 * This routine is invoked to set up SLI4 BAR1 register memory map.
James Smartda0436e2009-05-22 14:51:39 -04008337 **/
8338static void
James Smart27d6ac02018-02-22 08:18:42 -08008339lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
James Smartda0436e2009-05-22 14:51:39 -04008340{
James Smart27d6ac02018-02-22 08:18:42 -08008341 switch (if_type) {
8342 case LPFC_SLI_INTF_IF_TYPE_0:
8343 phba->sli4_hba.PSMPHRregaddr =
8344 phba->sli4_hba.ctrl_regs_memmap_p +
8345 LPFC_SLIPORT_IF0_SMPHR;
8346 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8347 LPFC_HST_ISR0;
8348 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8349 LPFC_HST_IMR0;
8350 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8351 LPFC_HST_ISCR0;
8352 break;
8353 case LPFC_SLI_INTF_IF_TYPE_6:
8354 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8355 LPFC_IF6_RQ_DOORBELL;
8356 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8357 LPFC_IF6_WQ_DOORBELL;
8358 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8359 LPFC_IF6_CQ_DOORBELL;
8360 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8361 LPFC_IF6_EQ_DOORBELL;
8362 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8363 LPFC_IF6_MQ_DOORBELL;
8364 break;
8365 case LPFC_SLI_INTF_IF_TYPE_2:
8366 case LPFC_SLI_INTF_IF_TYPE_1:
8367 default:
8368 dev_err(&phba->pcidev->dev,
8369 "FATAL - unsupported SLI4 interface type - %d\n",
8370 if_type);
8371 break;
8372 }
James Smartda0436e2009-05-22 14:51:39 -04008373}
8374
8375/**
8376 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8377 * @phba: pointer to lpfc hba data structure.
8378 * @vf: virtual function number
8379 *
8380 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8381 * based on the given viftual function number, @vf.
8382 *
8383 * Return 0 if successful, otherwise -ENODEV.
8384 **/
8385static int
8386lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8387{
8388 if (vf > LPFC_VIR_FUNC_MAX)
8389 return -ENODEV;
8390
8391 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
James Smart962bc512013-01-03 15:44:00 -05008392 vf * LPFC_VFR_PAGE_SIZE +
8393 LPFC_ULP0_RQ_DOORBELL);
James Smartda0436e2009-05-22 14:51:39 -04008394 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
James Smart962bc512013-01-03 15:44:00 -05008395 vf * LPFC_VFR_PAGE_SIZE +
8396 LPFC_ULP0_WQ_DOORBELL);
James Smart9dd35422018-02-22 08:18:41 -08008397 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8398 vf * LPFC_VFR_PAGE_SIZE +
8399 LPFC_EQCQ_DOORBELL);
8400 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
James Smartda0436e2009-05-22 14:51:39 -04008401 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8402 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8403 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8404 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8405 return 0;
8406}
8407
8408/**
8409 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8410 * @phba: pointer to lpfc hba data structure.
8411 *
8412 * This routine is invoked to create the bootstrap mailbox
8413 * region consistent with the SLI-4 interface spec. This
8414 * routine allocates all memory necessary to communicate
8415 * mailbox commands to the port and sets up all alignment
8416 * needs. No locks are expected to be held when calling
8417 * this routine.
8418 *
8419 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02008420 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -04008421 * -ENOMEM - could not allocated memory.
James Smartda0436e2009-05-22 14:51:39 -04008422 **/
8423static int
8424lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8425{
8426 uint32_t bmbx_size;
8427 struct lpfc_dmabuf *dmabuf;
8428 struct dma_address *dma_address;
8429 uint32_t pa_addr;
8430 uint64_t phys_addr;
8431
8432 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8433 if (!dmabuf)
8434 return -ENOMEM;
8435
8436 /*
8437 * The bootstrap mailbox region is comprised of 2 parts
8438 * plus an alignment restriction of 16 bytes.
8439 */
8440 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
Luis Chamberlain750afb02019-01-04 09:23:09 +01008441 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8442 &dmabuf->phys, GFP_KERNEL);
James Smartda0436e2009-05-22 14:51:39 -04008443 if (!dmabuf->virt) {
8444 kfree(dmabuf);
8445 return -ENOMEM;
8446 }
James Smartda0436e2009-05-22 14:51:39 -04008447
8448 /*
8449 * Initialize the bootstrap mailbox pointers now so that the register
8450 * operations are simple later. The mailbox dma address is required
8451 * to be 16-byte aligned. Also align the virtual memory as each
8452 * maibox is copied into the bmbx mailbox region before issuing the
8453 * command to the port.
8454 */
8455 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8456 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8457
8458 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8459 LPFC_ALIGN_16_BYTE);
8460 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8461 LPFC_ALIGN_16_BYTE);
8462
8463 /*
8464 * Set the high and low physical addresses now. The SLI4 alignment
8465 * requirement is 16 bytes and the mailbox is posted to the port
8466 * as two 30-bit addresses. The other data is a bit marking whether
8467 * the 30-bit address is the high or low address.
8468 * Upcast bmbx aphys to 64bits so shift instruction compiles
8469 * clean on 32 bit machines.
8470 */
8471 dma_address = &phba->sli4_hba.bmbx.dma_address;
8472 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8473 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8474 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8475 LPFC_BMBX_BIT1_ADDR_HI);
8476
8477 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8478 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8479 LPFC_BMBX_BIT1_ADDR_LO);
8480 return 0;
8481}
8482
8483/**
8484 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8485 * @phba: pointer to lpfc hba data structure.
8486 *
8487 * This routine is invoked to teardown the bootstrap mailbox
8488 * region and release all host resources. This routine requires
8489 * the caller to ensure all mailbox commands recovered, no
8490 * additional mailbox comands are sent, and interrupts are disabled
8491 * before calling this routine.
8492 *
8493 **/
8494static void
8495lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8496{
8497 dma_free_coherent(&phba->pcidev->dev,
8498 phba->sli4_hba.bmbx.bmbx_size,
8499 phba->sli4_hba.bmbx.dmabuf->virt,
8500 phba->sli4_hba.bmbx.dmabuf->phys);
8501
8502 kfree(phba->sli4_hba.bmbx.dmabuf);
8503 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8504}
8505
James Smart83c6cb12019-10-18 14:18:30 -07008506static const char * const lpfc_topo_to_str[] = {
8507 "Loop then P2P",
8508 "Loopback",
8509 "P2P Only",
8510 "Unsupported",
8511 "Loop Only",
8512 "Unsupported",
8513 "P2P then Loop",
8514};
8515
Lee Jonesfe614ac2020-07-23 13:24:22 +01008516#define LINK_FLAGS_DEF 0x0
8517#define LINK_FLAGS_P2P 0x1
8518#define LINK_FLAGS_LOOP 0x2
James Smart83c6cb12019-10-18 14:18:30 -07008519/**
8520 * lpfc_map_topology - Map the topology read from READ_CONFIG
8521 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +01008522 * @rd_config: pointer to read config data
James Smart83c6cb12019-10-18 14:18:30 -07008523 *
8524 * This routine is invoked to map the topology values as read
8525 * from the read config mailbox command. If the persistent
8526 * topology feature is supported, the firmware will provide the
8527 * saved topology information to be used in INIT_LINK
James Smart83c6cb12019-10-18 14:18:30 -07008528 **/
James Smart83c6cb12019-10-18 14:18:30 -07008529static void
8530lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8531{
8532 u8 ptv, tf, pt;
8533
8534 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8535 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8536 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8537
8538 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8539 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8540 ptv, tf, pt);
8541 if (!ptv) {
8542 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8543 "2019 FW does not support persistent topology "
8544 "Using driver parameter defined value [%s]",
8545 lpfc_topo_to_str[phba->cfg_topology]);
8546 return;
8547 }
8548 /* FW supports persistent topology - override module parameter value */
8549 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8550 switch (phba->pcidev->device) {
8551 case PCI_DEVICE_ID_LANCER_G7_FC:
James Smart83c6cb12019-10-18 14:18:30 -07008552 case PCI_DEVICE_ID_LANCER_G6_FC:
8553 if (!tf) {
8554 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8555 ? FLAGS_TOPOLOGY_MODE_LOOP
8556 : FLAGS_TOPOLOGY_MODE_PT_PT);
8557 } else {
8558 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8559 }
8560 break;
8561 default: /* G5 */
8562 if (tf) {
8563 /* If topology failover set - pt is '0' or '1' */
8564 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8565 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8566 } else {
8567 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8568 ? FLAGS_TOPOLOGY_MODE_PT_PT
8569 : FLAGS_TOPOLOGY_MODE_LOOP);
8570 }
8571 break;
8572 }
8573 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8574 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8575 "2020 Using persistent topology value [%s]",
8576 lpfc_topo_to_str[phba->cfg_topology]);
8577 } else {
8578 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8579 "2021 Invalid topology values from FW "
8580 "Using driver parameter defined value [%s]",
8581 lpfc_topo_to_str[phba->cfg_topology]);
8582 }
8583}
8584
James Smartda0436e2009-05-22 14:51:39 -04008585/**
8586 * lpfc_sli4_read_config - Get the config parameters.
8587 * @phba: pointer to lpfc hba data structure.
8588 *
8589 * This routine is invoked to read the configuration parameters from the HBA.
8590 * The configuration parameters are used to set the base and maximum values
8591 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8592 * allocation for the port.
8593 *
8594 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02008595 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008596 * -ENOMEM - No available memory
James Smartd439d282010-09-29 11:18:45 -04008597 * -EIO - The mailbox failed to complete successfully.
James Smartda0436e2009-05-22 14:51:39 -04008598 **/
James Smartff78d8f2011-12-13 13:21:35 -05008599int
James Smartda0436e2009-05-22 14:51:39 -04008600lpfc_sli4_read_config(struct lpfc_hba *phba)
8601{
8602 LPFC_MBOXQ_t *pmb;
8603 struct lpfc_mbx_read_config *rd_config;
James Smart912e3ac2011-05-24 11:42:11 -04008604 union lpfc_sli4_cfg_shdr *shdr;
8605 uint32_t shdr_status, shdr_add_status;
8606 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8607 struct lpfc_rsrc_desc_fcfcoe *desc;
James Smart8aa134a2012-08-14 14:25:29 -04008608 char *pdesc_0;
James Smartc6918162016-10-13 15:06:16 -07008609 uint16_t forced_link_speed;
James Smart6a828b02019-01-28 11:14:31 -08008610 uint32_t if_type, qmin;
James Smart8aa134a2012-08-14 14:25:29 -04008611 int length, i, rc = 0, rc2;
James Smartda0436e2009-05-22 14:51:39 -04008612
8613 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8614 if (!pmb) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04008616 "2011 Unable to allocate memory for issuing "
8617 "SLI_CONFIG_SPECIAL mailbox command\n");
8618 return -ENOMEM;
8619 }
8620
8621 lpfc_read_config(phba, pmb);
8622
8623 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8624 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008625 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8626 "2012 Mailbox failed , mbxCmd x%x "
8627 "READ_CONFIG, mbxStatus x%x\n",
8628 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8629 bf_get(lpfc_mqe_status, &pmb->u.mqe));
James Smartda0436e2009-05-22 14:51:39 -04008630 rc = -EIO;
8631 } else {
8632 rd_config = &pmb->u.mqe.un.rd_config;
James Smartff78d8f2011-12-13 13:21:35 -05008633 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8634 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8635 phba->sli4_hba.lnk_info.lnk_tp =
8636 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8637 phba->sli4_hba.lnk_info.lnk_no =
8638 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8639 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8640 "3081 lnk_type:%d, lnk_numb:%d\n",
8641 phba->sli4_hba.lnk_info.lnk_tp,
8642 phba->sli4_hba.lnk_info.lnk_no);
8643 } else
8644 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8645 "3082 Mailbox (x%x) returned ldv:x0\n",
8646 bf_get(lpfc_mqe_command, &pmb->u.mqe));
James Smart44fd7fe2017-08-23 16:55:47 -07008647 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8648 phba->bbcredit_support = 1;
8649 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8650 }
8651
James Smart1dc5ec22018-10-23 13:41:11 -07008652 phba->sli4_hba.conf_trunk =
8653 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
James Smart6d368e52011-05-24 11:44:12 -04008654 phba->sli4_hba.extents_in_use =
8655 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
James Smartda0436e2009-05-22 14:51:39 -04008656 phba->sli4_hba.max_cfg_param.max_xri =
8657 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
James Smart31f06d22019-08-14 16:56:31 -07008658 /* Reduce resource usage in kdump environment */
8659 if (is_kdump_kernel() &&
8660 phba->sli4_hba.max_cfg_param.max_xri > 512)
8661 phba->sli4_hba.max_cfg_param.max_xri = 512;
James Smartda0436e2009-05-22 14:51:39 -04008662 phba->sli4_hba.max_cfg_param.xri_base =
8663 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8664 phba->sli4_hba.max_cfg_param.max_vpi =
8665 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
James Smart8b47ae62018-11-29 16:09:33 -08008666 /* Limit the max we support */
8667 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8668 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
James Smartda0436e2009-05-22 14:51:39 -04008669 phba->sli4_hba.max_cfg_param.vpi_base =
8670 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8671 phba->sli4_hba.max_cfg_param.max_rpi =
8672 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8673 phba->sli4_hba.max_cfg_param.rpi_base =
8674 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8675 phba->sli4_hba.max_cfg_param.max_vfi =
8676 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8677 phba->sli4_hba.max_cfg_param.vfi_base =
8678 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8679 phba->sli4_hba.max_cfg_param.max_fcfi =
8680 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
James Smartda0436e2009-05-22 14:51:39 -04008681 phba->sli4_hba.max_cfg_param.max_eq =
8682 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8683 phba->sli4_hba.max_cfg_param.max_rq =
8684 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8685 phba->sli4_hba.max_cfg_param.max_wq =
8686 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8687 phba->sli4_hba.max_cfg_param.max_cq =
8688 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8689 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8690 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8691 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8692 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
James Smart5ffc2662009-11-18 15:39:44 -05008693 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8694 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
James Smartda0436e2009-05-22 14:51:39 -04008695 phba->max_vports = phba->max_vpi;
James Smart83c6cb12019-10-18 14:18:30 -07008696 lpfc_map_topology(phba, rd_config);
James Smartda0436e2009-05-22 14:51:39 -04008697 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart6d368e52011-05-24 11:44:12 -04008698 "2003 cfg params Extents? %d "
8699 "XRI(B:%d M:%d), "
James Smartda0436e2009-05-22 14:51:39 -04008700 "VPI(B:%d M:%d) "
8701 "VFI(B:%d M:%d) "
8702 "RPI(B:%d M:%d) "
Dick Kennedya1e4d3d2020-08-03 14:02:22 -07008703 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
James Smart6d368e52011-05-24 11:44:12 -04008704 phba->sli4_hba.extents_in_use,
James Smartda0436e2009-05-22 14:51:39 -04008705 phba->sli4_hba.max_cfg_param.xri_base,
8706 phba->sli4_hba.max_cfg_param.max_xri,
8707 phba->sli4_hba.max_cfg_param.vpi_base,
8708 phba->sli4_hba.max_cfg_param.max_vpi,
8709 phba->sli4_hba.max_cfg_param.vfi_base,
8710 phba->sli4_hba.max_cfg_param.max_vfi,
8711 phba->sli4_hba.max_cfg_param.rpi_base,
8712 phba->sli4_hba.max_cfg_param.max_rpi,
James Smart2ea259e2017-02-12 13:52:27 -08008713 phba->sli4_hba.max_cfg_param.max_fcfi,
8714 phba->sli4_hba.max_cfg_param.max_eq,
8715 phba->sli4_hba.max_cfg_param.max_cq,
8716 phba->sli4_hba.max_cfg_param.max_wq,
Dick Kennedya1e4d3d2020-08-03 14:02:22 -07008717 phba->sli4_hba.max_cfg_param.max_rq,
8718 phba->lmt);
James Smart2ea259e2017-02-12 13:52:27 -08008719
James Smartd38f33b2018-05-04 20:37:54 -07008720 /*
James Smart6a828b02019-01-28 11:14:31 -08008721 * Calculate queue resources based on how
8722 * many WQ/CQ/EQs are available.
James Smartd38f33b2018-05-04 20:37:54 -07008723 */
James Smart6a828b02019-01-28 11:14:31 -08008724 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8725 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8726 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8727 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8728 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8729 /*
8730 * Whats left after this can go toward NVME / FCP.
8731 * The minus 4 accounts for ELS, NVME LS, MBOX
8732 * plus one extra. When configured for
8733 * NVMET, FCP io channel WQs are not created.
8734 */
8735 qmin -= 4;
James Smartd38f33b2018-05-04 20:37:54 -07008736
James Smart6a828b02019-01-28 11:14:31 -08008737 /* Check to see if there is enough for NVME */
8738 if ((phba->cfg_irq_chann > qmin) ||
8739 (phba->cfg_hdw_queue > qmin)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Dick Kennedy9e3e3652020-08-03 14:02:23 -07008741 "2005 Reducing Queues - "
8742 "FW resource limitation: "
James Smart6a828b02019-01-28 11:14:31 -08008743 "WQ %d CQ %d EQ %d: min %d: "
8744 "IRQ %d HDWQ %d\n",
James Smartd38f33b2018-05-04 20:37:54 -07008745 phba->sli4_hba.max_cfg_param.max_wq,
8746 phba->sli4_hba.max_cfg_param.max_cq,
James Smart6a828b02019-01-28 11:14:31 -08008747 phba->sli4_hba.max_cfg_param.max_eq,
8748 qmin, phba->cfg_irq_chann,
James Smartcdb42be2019-01-28 11:14:21 -08008749 phba->cfg_hdw_queue);
James Smartd38f33b2018-05-04 20:37:54 -07008750
James Smart6a828b02019-01-28 11:14:31 -08008751 if (phba->cfg_irq_chann > qmin)
8752 phba->cfg_irq_chann = qmin;
8753 if (phba->cfg_hdw_queue > qmin)
8754 phba->cfg_hdw_queue = qmin;
James Smartd38f33b2018-05-04 20:37:54 -07008755 }
James Smartda0436e2009-05-22 14:51:39 -04008756 }
James Smart912e3ac2011-05-24 11:42:11 -04008757
8758 if (rc)
8759 goto read_cfg_out;
James Smartda0436e2009-05-22 14:51:39 -04008760
James Smartc6918162016-10-13 15:06:16 -07008761 /* Update link speed if forced link speed is supported */
8762 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
James Smart27d6ac02018-02-22 08:18:42 -08008763 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
James Smartc6918162016-10-13 15:06:16 -07008764 forced_link_speed =
8765 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8766 if (forced_link_speed) {
8767 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8768
8769 switch (forced_link_speed) {
8770 case LINK_SPEED_1G:
8771 phba->cfg_link_speed =
8772 LPFC_USER_LINK_SPEED_1G;
8773 break;
8774 case LINK_SPEED_2G:
8775 phba->cfg_link_speed =
8776 LPFC_USER_LINK_SPEED_2G;
8777 break;
8778 case LINK_SPEED_4G:
8779 phba->cfg_link_speed =
8780 LPFC_USER_LINK_SPEED_4G;
8781 break;
8782 case LINK_SPEED_8G:
8783 phba->cfg_link_speed =
8784 LPFC_USER_LINK_SPEED_8G;
8785 break;
8786 case LINK_SPEED_10G:
8787 phba->cfg_link_speed =
8788 LPFC_USER_LINK_SPEED_10G;
8789 break;
8790 case LINK_SPEED_16G:
8791 phba->cfg_link_speed =
8792 LPFC_USER_LINK_SPEED_16G;
8793 break;
8794 case LINK_SPEED_32G:
8795 phba->cfg_link_speed =
8796 LPFC_USER_LINK_SPEED_32G;
8797 break;
James Smartfbd8a6b2018-02-22 08:18:45 -08008798 case LINK_SPEED_64G:
8799 phba->cfg_link_speed =
8800 LPFC_USER_LINK_SPEED_64G;
8801 break;
James Smartc6918162016-10-13 15:06:16 -07008802 case 0xffff:
8803 phba->cfg_link_speed =
8804 LPFC_USER_LINK_SPEED_AUTO;
8805 break;
8806 default:
Dick Kennedy372c1872020-06-30 14:50:00 -07008807 lpfc_printf_log(phba, KERN_ERR,
8808 LOG_TRACE_EVENT,
James Smartc6918162016-10-13 15:06:16 -07008809 "0047 Unrecognized link "
8810 "speed : %d\n",
8811 forced_link_speed);
8812 phba->cfg_link_speed =
8813 LPFC_USER_LINK_SPEED_AUTO;
8814 }
8815 }
8816 }
8817
James Smartda0436e2009-05-22 14:51:39 -04008818 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
James Smart572709e2013-07-15 18:32:43 -04008819 length = phba->sli4_hba.max_cfg_param.max_xri -
8820 lpfc_sli4_get_els_iocb_cnt(phba);
8821 if (phba->cfg_hba_queue_depth > length) {
8822 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8823 "3361 HBA queue depth changed from %d to %d\n",
8824 phba->cfg_hba_queue_depth, length);
8825 phba->cfg_hba_queue_depth = length;
8826 }
James Smart912e3ac2011-05-24 11:42:11 -04008827
James Smart27d6ac02018-02-22 08:18:42 -08008828 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
James Smart912e3ac2011-05-24 11:42:11 -04008829 LPFC_SLI_INTF_IF_TYPE_2)
8830 goto read_cfg_out;
8831
8832 /* get the pf# and vf# for SLI4 if_type 2 port */
8833 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8834 sizeof(struct lpfc_sli4_cfg_mhdr));
8835 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8836 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8837 length, LPFC_SLI4_MBX_EMBED);
8838
James Smart8aa134a2012-08-14 14:25:29 -04008839 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
James Smart912e3ac2011-05-24 11:42:11 -04008840 shdr = (union lpfc_sli4_cfg_shdr *)
8841 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8842 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8843 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
James Smart8aa134a2012-08-14 14:25:29 -04008844 if (rc2 || shdr_status || shdr_add_status) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart912e3ac2011-05-24 11:42:11 -04008846 "3026 Mailbox failed , mbxCmd x%x "
8847 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8848 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8849 bf_get(lpfc_mqe_status, &pmb->u.mqe));
James Smart912e3ac2011-05-24 11:42:11 -04008850 goto read_cfg_out;
8851 }
8852
8853 /* search for fc_fcoe resrouce descriptor */
8854 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
James Smart912e3ac2011-05-24 11:42:11 -04008855
James Smart8aa134a2012-08-14 14:25:29 -04008856 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8857 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8858 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8859 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8860 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8861 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8862 goto read_cfg_out;
8863
James Smart912e3ac2011-05-24 11:42:11 -04008864 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
James Smart8aa134a2012-08-14 14:25:29 -04008865 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
James Smart912e3ac2011-05-24 11:42:11 -04008866 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
James Smart8aa134a2012-08-14 14:25:29 -04008867 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
James Smart912e3ac2011-05-24 11:42:11 -04008868 phba->sli4_hba.iov.pf_number =
8869 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8870 phba->sli4_hba.iov.vf_number =
8871 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8872 break;
8873 }
8874 }
8875
8876 if (i < LPFC_RSRC_DESC_MAX_NUM)
8877 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8878 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8879 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8880 phba->sli4_hba.iov.vf_number);
James Smart8aa134a2012-08-14 14:25:29 -04008881 else
Dick Kennedy372c1872020-06-30 14:50:00 -07008882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart912e3ac2011-05-24 11:42:11 -04008883 "3028 GET_FUNCTION_CONFIG: failed to find "
Colin Ian Kingc4dba182018-10-16 18:28:53 +01008884 "Resource Descriptor:x%x\n",
James Smart912e3ac2011-05-24 11:42:11 -04008885 LPFC_RSRC_DESC_TYPE_FCFCOE);
James Smart912e3ac2011-05-24 11:42:11 -04008886
8887read_cfg_out:
8888 mempool_free(pmb, phba->mbox_mem_pool);
James Smartda0436e2009-05-22 14:51:39 -04008889 return rc;
8890}
8891
8892/**
James Smart2fcee4b2010-12-15 17:57:46 -05008893 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
James Smartda0436e2009-05-22 14:51:39 -04008894 * @phba: pointer to lpfc hba data structure.
8895 *
James Smart2fcee4b2010-12-15 17:57:46 -05008896 * This routine is invoked to setup the port-side endian order when
8897 * the port if_type is 0. This routine has no function for other
8898 * if_types.
James Smartda0436e2009-05-22 14:51:39 -04008899 *
8900 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02008901 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008902 * -ENOMEM - No available memory
James Smartd439d282010-09-29 11:18:45 -04008903 * -EIO - The mailbox failed to complete successfully.
James Smartda0436e2009-05-22 14:51:39 -04008904 **/
8905static int
8906lpfc_setup_endian_order(struct lpfc_hba *phba)
8907{
8908 LPFC_MBOXQ_t *mboxq;
James Smart2fcee4b2010-12-15 17:57:46 -05008909 uint32_t if_type, rc = 0;
James Smartda0436e2009-05-22 14:51:39 -04008910 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8911 HOST_ENDIAN_HIGH_WORD1};
8912
James Smart2fcee4b2010-12-15 17:57:46 -05008913 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8914 switch (if_type) {
8915 case LPFC_SLI_INTF_IF_TYPE_0:
8916 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8917 GFP_KERNEL);
8918 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008919 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -05008920 "0492 Unable to allocate memory for "
8921 "issuing SLI_CONFIG_SPECIAL mailbox "
8922 "command\n");
8923 return -ENOMEM;
8924 }
James Smartda0436e2009-05-22 14:51:39 -04008925
James Smart2fcee4b2010-12-15 17:57:46 -05008926 /*
8927 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8928 * two words to contain special data values and no other data.
8929 */
8930 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8931 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8932 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8933 if (rc != MBX_SUCCESS) {
Dick Kennedy372c1872020-06-30 14:50:00 -07008934 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -05008935 "0493 SLI_CONFIG_SPECIAL mailbox "
8936 "failed with status x%x\n",
8937 rc);
8938 rc = -EIO;
8939 }
8940 mempool_free(mboxq, phba->mbox_mem_pool);
8941 break;
James Smart27d6ac02018-02-22 08:18:42 -08008942 case LPFC_SLI_INTF_IF_TYPE_6:
James Smart2fcee4b2010-12-15 17:57:46 -05008943 case LPFC_SLI_INTF_IF_TYPE_2:
8944 case LPFC_SLI_INTF_IF_TYPE_1:
8945 default:
8946 break;
James Smartda0436e2009-05-22 14:51:39 -04008947 }
James Smartda0436e2009-05-22 14:51:39 -04008948 return rc;
8949}
8950
8951/**
James Smart895427b2017-02-12 13:52:30 -08008952 * lpfc_sli4_queue_verify - Verify and update EQ counts
James Smartda0436e2009-05-22 14:51:39 -04008953 * @phba: pointer to lpfc hba data structure.
8954 *
James Smart895427b2017-02-12 13:52:30 -08008955 * This routine is invoked to check the user settable queue counts for EQs.
8956 * After this routine is called the counts will be set to valid values that
James Smart5350d872011-10-10 21:33:49 -04008957 * adhere to the constraints of the system's interrupt vectors and the port's
8958 * queue resources.
James Smartda0436e2009-05-22 14:51:39 -04008959 *
8960 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02008961 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008962 * -ENOMEM - No available memory
James Smartda0436e2009-05-22 14:51:39 -04008963 **/
8964static int
James Smart5350d872011-10-10 21:33:49 -04008965lpfc_sli4_queue_verify(struct lpfc_hba *phba)
James Smartda0436e2009-05-22 14:51:39 -04008966{
James Smartda0436e2009-05-22 14:51:39 -04008967 /*
James Smart67d12732012-08-03 12:36:13 -04008968 * Sanity check for configured queue parameters against the run-time
James Smartda0436e2009-05-22 14:51:39 -04008969 * device parameters
8970 */
8971
James Smartbcb24f62017-11-20 16:00:36 -08008972 if (phba->nvmet_support) {
James Smart97a9ed32019-10-18 14:18:17 -07008973 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8974 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
James Smart982ab122019-03-12 16:30:10 -07008975 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8976 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
James Smartbcb24f62017-11-20 16:00:36 -08008977 }
James Smart895427b2017-02-12 13:52:30 -08008978
8979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart6a828b02019-01-28 11:14:31 -08008980 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8981 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8982 phba->cfg_nvmet_mrq);
James Smartda0436e2009-05-22 14:51:39 -04008983
James Smartda0436e2009-05-22 14:51:39 -04008984 /* Get EQ depth from module parameter, fake the default for now */
8985 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8986 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8987
James Smart5350d872011-10-10 21:33:49 -04008988 /* Get CQ depth from module parameter, fake the default for now */
8989 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8990 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
James Smart5350d872011-10-10 21:33:49 -04008991 return 0;
James Smart895427b2017-02-12 13:52:30 -08008992}
8993
8994static int
James Smartc00f62e2019-08-14 16:57:11 -07008995lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
James Smart895427b2017-02-12 13:52:30 -08008996{
8997 struct lpfc_queue *qdesc;
James Smartc00f62e2019-08-14 16:57:11 -07008998 u32 wqesize;
James Smartc1a21eb2019-03-12 16:30:29 -07008999 int cpu;
James Smart895427b2017-02-12 13:52:30 -08009000
James Smartc00f62e2019-08-14 16:57:11 -07009001 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
9002 /* Create Fast Path IO CQs */
James Smartc176ffa2018-01-30 15:58:46 -08009003 if (phba->enab_exp_wqcq_pages)
James Smarta51e41b2017-12-08 17:18:06 -08009004 /* Increase the CQ size when WQEs contain an embedded cdb */
9005 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
9006 phba->sli4_hba.cq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009007 LPFC_CQE_EXP_COUNT, cpu);
James Smarta51e41b2017-12-08 17:18:06 -08009008
9009 else
9010 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9011 phba->sli4_hba.cq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009012 phba->sli4_hba.cq_ecount, cpu);
James Smart895427b2017-02-12 13:52:30 -08009013 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9015 "0499 Failed allocate fast-path IO CQ (%d)\n",
9016 idx);
James Smart895427b2017-02-12 13:52:30 -08009017 return 1;
9018 }
James Smart7365f6f2018-02-22 08:18:46 -08009019 qdesc->qe_valid = 1;
James Smartc00f62e2019-08-14 16:57:11 -07009020 qdesc->hdwq = idx;
James Smartc1a21eb2019-03-12 16:30:29 -07009021 qdesc->chann = cpu;
James Smartc00f62e2019-08-14 16:57:11 -07009022 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
James Smart895427b2017-02-12 13:52:30 -08009023
James Smartc00f62e2019-08-14 16:57:11 -07009024 /* Create Fast Path IO WQs */
James Smartc176ffa2018-01-30 15:58:46 -08009025 if (phba->enab_exp_wqcq_pages) {
James Smarta51e41b2017-12-08 17:18:06 -08009026 /* Increase the WQ size when WQEs contain an embedded cdb */
James Smartc176ffa2018-01-30 15:58:46 -08009027 wqesize = (phba->fcp_embed_io) ?
9028 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
James Smarta51e41b2017-12-08 17:18:06 -08009029 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
James Smartc176ffa2018-01-30 15:58:46 -08009030 wqesize,
James Smartc1a21eb2019-03-12 16:30:29 -07009031 LPFC_WQE_EXP_COUNT, cpu);
James Smartc176ffa2018-01-30 15:58:46 -08009032 } else
James Smarta51e41b2017-12-08 17:18:06 -08009033 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9034 phba->sli4_hba.wq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009035 phba->sli4_hba.wq_ecount, cpu);
James Smartc176ffa2018-01-30 15:58:46 -08009036
James Smart895427b2017-02-12 13:52:30 -08009037 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009038 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartc00f62e2019-08-14 16:57:11 -07009039 "0503 Failed allocate fast-path IO WQ (%d)\n",
9040 idx);
James Smart895427b2017-02-12 13:52:30 -08009041 return 1;
9042 }
James Smartc00f62e2019-08-14 16:57:11 -07009043 qdesc->hdwq = idx;
9044 qdesc->chann = cpu;
9045 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
James Smart895427b2017-02-12 13:52:30 -08009046 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9047 return 0;
James Smart5350d872011-10-10 21:33:49 -04009048}
9049
9050/**
9051 * lpfc_sli4_queue_create - Create all the SLI4 queues
9052 * @phba: pointer to lpfc hba data structure.
9053 *
9054 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
9055 * operation. For each SLI4 queue type, the parameters such as queue entry
9056 * count (queue depth) shall be taken from the module parameter. For now,
9057 * we just use some constant number as place holder.
9058 *
9059 * Return codes
Anatol Pomozov4907cb72012-09-01 10:31:09 -07009060 * 0 - successful
James Smart5350d872011-10-10 21:33:49 -04009061 * -ENOMEM - No availble memory
9062 * -EIO - The mailbox failed to complete successfully.
9063 **/
9064int
9065lpfc_sli4_queue_create(struct lpfc_hba *phba)
9066{
9067 struct lpfc_queue *qdesc;
James Smart657add42019-05-21 17:49:06 -07009068 int idx, cpu, eqcpu;
James Smart5e5b5112019-01-28 11:14:22 -08009069 struct lpfc_sli4_hdw_queue *qp;
James Smart657add42019-05-21 17:49:06 -07009070 struct lpfc_vector_map_info *cpup;
9071 struct lpfc_vector_map_info *eqcpup;
James Smart32517fc2019-01-28 11:14:33 -08009072 struct lpfc_eq_intr_info *eqi;
James Smart5350d872011-10-10 21:33:49 -04009073
9074 /*
James Smart67d12732012-08-03 12:36:13 -04009075 * Create HBA Record arrays.
James Smart895427b2017-02-12 13:52:30 -08009076 * Both NVME and FCP will share that same vectors / EQs
James Smart5350d872011-10-10 21:33:49 -04009077 */
James Smart67d12732012-08-03 12:36:13 -04009078 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
9079 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
9080 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
9081 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
9082 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
9083 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
James Smart895427b2017-02-12 13:52:30 -08009084 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
9085 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
9086 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
9087 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
James Smart67d12732012-08-03 12:36:13 -04009088
James Smartcdb42be2019-01-28 11:14:21 -08009089 if (!phba->sli4_hba.hdwq) {
James Smart5e5b5112019-01-28 11:14:22 -08009090 phba->sli4_hba.hdwq = kcalloc(
9091 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
9092 GFP_KERNEL);
9093 if (!phba->sli4_hba.hdwq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5e5b5112019-01-28 11:14:22 -08009095 "6427 Failed allocate memory for "
9096 "fast-path Hardware Queue array\n");
James Smart895427b2017-02-12 13:52:30 -08009097 goto out_error;
9098 }
James Smart5e5b5112019-01-28 11:14:22 -08009099 /* Prepare hardware queues to take IO buffers */
9100 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9101 qp = &phba->sli4_hba.hdwq[idx];
9102 spin_lock_init(&qp->io_buf_list_get_lock);
9103 spin_lock_init(&qp->io_buf_list_put_lock);
9104 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
9105 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
9106 qp->get_io_bufs = 0;
9107 qp->put_io_bufs = 0;
9108 qp->total_io_bufs = 0;
James Smartc00f62e2019-08-14 16:57:11 -07009109 spin_lock_init(&qp->abts_io_buf_list_lock);
9110 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
James Smart5e5b5112019-01-28 11:14:22 -08009111 qp->abts_scsi_io_bufs = 0;
James Smart5e5b5112019-01-28 11:14:22 -08009112 qp->abts_nvme_io_bufs = 0;
James Smartd79c9e92019-08-14 16:57:09 -07009113 INIT_LIST_HEAD(&qp->sgl_list);
9114 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
9115 spin_lock_init(&qp->hdwq_lock);
James Smart895427b2017-02-12 13:52:30 -08009116 }
James Smart67d12732012-08-03 12:36:13 -04009117 }
9118
James Smartcdb42be2019-01-28 11:14:21 -08009119 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart2d7dbc42017-02-12 13:52:35 -08009120 if (phba->nvmet_support) {
9121 phba->sli4_hba.nvmet_cqset = kcalloc(
9122 phba->cfg_nvmet_mrq,
9123 sizeof(struct lpfc_queue *),
9124 GFP_KERNEL);
9125 if (!phba->sli4_hba.nvmet_cqset) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009127 "3121 Fail allocate memory for "
9128 "fast-path CQ set array\n");
9129 goto out_error;
9130 }
9131 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9132 phba->cfg_nvmet_mrq,
9133 sizeof(struct lpfc_queue *),
9134 GFP_KERNEL);
9135 if (!phba->sli4_hba.nvmet_mrq_hdr) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009137 "3122 Fail allocate memory for "
9138 "fast-path RQ set hdr array\n");
9139 goto out_error;
9140 }
9141 phba->sli4_hba.nvmet_mrq_data = kcalloc(
9142 phba->cfg_nvmet_mrq,
9143 sizeof(struct lpfc_queue *),
9144 GFP_KERNEL);
9145 if (!phba->sli4_hba.nvmet_mrq_data) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009146 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009147 "3124 Fail allocate memory for "
9148 "fast-path RQ set data array\n");
9149 goto out_error;
9150 }
9151 }
James Smart67d12732012-08-03 12:36:13 -04009152 }
James Smartda0436e2009-05-22 14:51:39 -04009153
James Smart895427b2017-02-12 13:52:30 -08009154 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
James Smart67d12732012-08-03 12:36:13 -04009155
James Smart895427b2017-02-12 13:52:30 -08009156 /* Create HBA Event Queues (EQs) */
James Smart657add42019-05-21 17:49:06 -07009157 for_each_present_cpu(cpu) {
9158 /* We only want to create 1 EQ per vector, even though
9159 * multiple CPUs might be using that vector. so only
9160 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
James Smart6a828b02019-01-28 11:14:31 -08009161 */
James Smart657add42019-05-21 17:49:06 -07009162 cpup = &phba->sli4_hba.cpu_map[cpu];
9163 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
James Smart6a828b02019-01-28 11:14:31 -08009164 continue;
James Smart657add42019-05-21 17:49:06 -07009165
9166 /* Get a ptr to the Hardware Queue associated with this CPU */
9167 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9168
9169 /* Allocate an EQ */
James Smart81b96ed2017-11-20 16:00:29 -08009170 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9171 phba->sli4_hba.eq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009172 phba->sli4_hba.eq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009173 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009174 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart657add42019-05-21 17:49:06 -07009175 "0497 Failed allocate EQ (%d)\n",
9176 cpup->hdwq);
James Smart67d12732012-08-03 12:36:13 -04009177 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009178 }
James Smart7365f6f2018-02-22 08:18:46 -08009179 qdesc->qe_valid = 1;
James Smart657add42019-05-21 17:49:06 -07009180 qdesc->hdwq = cpup->hdwq;
James Smart3ad348d2019-08-14 16:56:43 -07009181 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
James Smart32517fc2019-01-28 11:14:33 -08009182 qdesc->last_cpu = qdesc->chann;
James Smart657add42019-05-21 17:49:06 -07009183
9184 /* Save the allocated EQ in the Hardware Queue */
9185 qp->hba_eq = qdesc;
9186
James Smart32517fc2019-01-28 11:14:33 -08009187 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9188 list_add(&qdesc->cpu_list, &eqi->list);
James Smartda0436e2009-05-22 14:51:39 -04009189 }
9190
James Smart657add42019-05-21 17:49:06 -07009191 /* Now we need to populate the other Hardware Queues, that share
9192 * an IRQ vector, with the associated EQ ptr.
9193 */
9194 for_each_present_cpu(cpu) {
9195 cpup = &phba->sli4_hba.cpu_map[cpu];
9196
9197 /* Check for EQ already allocated in previous loop */
9198 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9199 continue;
9200
9201 /* Check for multiple CPUs per hdwq */
9202 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9203 if (qp->hba_eq)
9204 continue;
9205
9206 /* We need to share an EQ for this hdwq */
9207 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9208 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9209 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9210 }
James Smart895427b2017-02-12 13:52:30 -08009211
James Smartc00f62e2019-08-14 16:57:11 -07009212 /* Allocate IO Path SLI4 CQ/WQs */
James Smart6a828b02019-01-28 11:14:31 -08009213 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
James Smartc00f62e2019-08-14 16:57:11 -07009214 if (lpfc_alloc_io_wq_cq(phba, idx))
James Smart895427b2017-02-12 13:52:30 -08009215 goto out_error;
James Smart6a828b02019-01-28 11:14:31 -08009216 }
James Smart895427b2017-02-12 13:52:30 -08009217
James Smartc00f62e2019-08-14 16:57:11 -07009218 if (phba->nvmet_support) {
9219 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9220 cpu = lpfc_find_cpu_handle(phba, idx,
9221 LPFC_FIND_BY_HDWQ);
9222 qdesc = lpfc_sli4_queue_alloc(phba,
James Smart81b96ed2017-11-20 16:00:29 -08009223 LPFC_DEFAULT_PAGE_SIZE,
9224 phba->sli4_hba.cq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009225 phba->sli4_hba.cq_ecount,
9226 cpu);
James Smartc00f62e2019-08-14 16:57:11 -07009227 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcdb42be2019-01-28 11:14:21 -08009229 "3142 Failed allocate NVME "
9230 "CQ Set (%d)\n", idx);
James Smartc00f62e2019-08-14 16:57:11 -07009231 goto out_error;
James Smart2d7dbc42017-02-12 13:52:35 -08009232 }
James Smartc00f62e2019-08-14 16:57:11 -07009233 qdesc->qe_valid = 1;
9234 qdesc->hdwq = idx;
9235 qdesc->chann = cpu;
9236 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
James Smart2d7dbc42017-02-12 13:52:35 -08009237 }
9238 }
9239
James Smartda0436e2009-05-22 14:51:39 -04009240 /*
James Smart67d12732012-08-03 12:36:13 -04009241 * Create Slow Path Completion Queues (CQs)
James Smartda0436e2009-05-22 14:51:39 -04009242 */
9243
James Smartc1a21eb2019-03-12 16:30:29 -07009244 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
James Smartda0436e2009-05-22 14:51:39 -04009245 /* Create slow-path Mailbox Command Complete Queue */
James Smart81b96ed2017-11-20 16:00:29 -08009246 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9247 phba->sli4_hba.cq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009248 phba->sli4_hba.cq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009249 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009251 "0500 Failed allocate slow-path mailbox CQ\n");
James Smart67d12732012-08-03 12:36:13 -04009252 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009253 }
James Smart7365f6f2018-02-22 08:18:46 -08009254 qdesc->qe_valid = 1;
James Smartda0436e2009-05-22 14:51:39 -04009255 phba->sli4_hba.mbx_cq = qdesc;
9256
9257 /* Create slow-path ELS Complete Queue */
James Smart81b96ed2017-11-20 16:00:29 -08009258 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9259 phba->sli4_hba.cq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009260 phba->sli4_hba.cq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009261 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009263 "0501 Failed allocate slow-path ELS CQ\n");
James Smart67d12732012-08-03 12:36:13 -04009264 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009265 }
James Smart7365f6f2018-02-22 08:18:46 -08009266 qdesc->qe_valid = 1;
James Smartc00f62e2019-08-14 16:57:11 -07009267 qdesc->chann = cpu;
James Smartda0436e2009-05-22 14:51:39 -04009268 phba->sli4_hba.els_cq = qdesc;
9269
James Smartda0436e2009-05-22 14:51:39 -04009270
James Smart5350d872011-10-10 21:33:49 -04009271 /*
James Smart67d12732012-08-03 12:36:13 -04009272 * Create Slow Path Work Queues (WQs)
James Smart5350d872011-10-10 21:33:49 -04009273 */
James Smartda0436e2009-05-22 14:51:39 -04009274
9275 /* Create Mailbox Command Queue */
James Smartda0436e2009-05-22 14:51:39 -04009276
James Smart81b96ed2017-11-20 16:00:29 -08009277 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9278 phba->sli4_hba.mq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009279 phba->sli4_hba.mq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009280 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009282 "0505 Failed allocate slow-path MQ\n");
James Smart67d12732012-08-03 12:36:13 -04009283 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009284 }
James Smartc00f62e2019-08-14 16:57:11 -07009285 qdesc->chann = cpu;
James Smartda0436e2009-05-22 14:51:39 -04009286 phba->sli4_hba.mbx_wq = qdesc;
9287
9288 /*
James Smart67d12732012-08-03 12:36:13 -04009289 * Create ELS Work Queues
James Smartda0436e2009-05-22 14:51:39 -04009290 */
James Smartda0436e2009-05-22 14:51:39 -04009291
9292 /* Create slow-path ELS Work Queue */
James Smart81b96ed2017-11-20 16:00:29 -08009293 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9294 phba->sli4_hba.wq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009295 phba->sli4_hba.wq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009296 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009298 "0504 Failed allocate slow-path ELS WQ\n");
James Smart67d12732012-08-03 12:36:13 -04009299 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009300 }
James Smartc00f62e2019-08-14 16:57:11 -07009301 qdesc->chann = cpu;
James Smartda0436e2009-05-22 14:51:39 -04009302 phba->sli4_hba.els_wq = qdesc;
James Smart895427b2017-02-12 13:52:30 -08009303 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9304
9305 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9306 /* Create NVME LS Complete Queue */
James Smart81b96ed2017-11-20 16:00:29 -08009307 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9308 phba->sli4_hba.cq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009309 phba->sli4_hba.cq_ecount, cpu);
James Smart895427b2017-02-12 13:52:30 -08009310 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009312 "6079 Failed allocate NVME LS CQ\n");
9313 goto out_error;
9314 }
James Smartc00f62e2019-08-14 16:57:11 -07009315 qdesc->chann = cpu;
James Smart7365f6f2018-02-22 08:18:46 -08009316 qdesc->qe_valid = 1;
James Smart895427b2017-02-12 13:52:30 -08009317 phba->sli4_hba.nvmels_cq = qdesc;
9318
9319 /* Create NVME LS Work Queue */
James Smart81b96ed2017-11-20 16:00:29 -08009320 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9321 phba->sli4_hba.wq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009322 phba->sli4_hba.wq_ecount, cpu);
James Smart895427b2017-02-12 13:52:30 -08009323 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009325 "6080 Failed allocate NVME LS WQ\n");
9326 goto out_error;
9327 }
James Smartc00f62e2019-08-14 16:57:11 -07009328 qdesc->chann = cpu;
James Smart895427b2017-02-12 13:52:30 -08009329 phba->sli4_hba.nvmels_wq = qdesc;
9330 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9331 }
James Smartda0436e2009-05-22 14:51:39 -04009332
James Smartda0436e2009-05-22 14:51:39 -04009333 /*
9334 * Create Receive Queue (RQ)
9335 */
James Smartda0436e2009-05-22 14:51:39 -04009336
9337 /* Create Receive Queue for header */
James Smart81b96ed2017-11-20 16:00:29 -08009338 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9339 phba->sli4_hba.rq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009340 phba->sli4_hba.rq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009341 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009342 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009343 "0506 Failed allocate receive HRQ\n");
James Smart67d12732012-08-03 12:36:13 -04009344 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009345 }
9346 phba->sli4_hba.hdr_rq = qdesc;
9347
9348 /* Create Receive Queue for data */
James Smart81b96ed2017-11-20 16:00:29 -08009349 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9350 phba->sli4_hba.rq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009351 phba->sli4_hba.rq_ecount, cpu);
James Smartda0436e2009-05-22 14:51:39 -04009352 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009354 "0507 Failed allocate receive DRQ\n");
James Smart67d12732012-08-03 12:36:13 -04009355 goto out_error;
James Smartda0436e2009-05-22 14:51:39 -04009356 }
9357 phba->sli4_hba.dat_rq = qdesc;
9358
James Smartcdb42be2019-01-28 11:14:21 -08009359 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9360 phba->nvmet_support) {
James Smart2d7dbc42017-02-12 13:52:35 -08009361 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
James Smartc1a21eb2019-03-12 16:30:29 -07009362 cpu = lpfc_find_cpu_handle(phba, idx,
9363 LPFC_FIND_BY_HDWQ);
James Smart2d7dbc42017-02-12 13:52:35 -08009364 /* Create NVMET Receive Queue for header */
9365 qdesc = lpfc_sli4_queue_alloc(phba,
James Smart81b96ed2017-11-20 16:00:29 -08009366 LPFC_DEFAULT_PAGE_SIZE,
James Smart2d7dbc42017-02-12 13:52:35 -08009367 phba->sli4_hba.rq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009368 LPFC_NVMET_RQE_DEF_COUNT,
9369 cpu);
James Smart2d7dbc42017-02-12 13:52:35 -08009370 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009371 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009372 "3146 Failed allocate "
9373 "receive HRQ\n");
9374 goto out_error;
9375 }
James Smart5e5b5112019-01-28 11:14:22 -08009376 qdesc->hdwq = idx;
James Smart2d7dbc42017-02-12 13:52:35 -08009377 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9378
9379 /* Only needed for header of RQ pair */
James Smartc1a21eb2019-03-12 16:30:29 -07009380 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9381 GFP_KERNEL,
9382 cpu_to_node(cpu));
James Smart2d7dbc42017-02-12 13:52:35 -08009383 if (qdesc->rqbp == NULL) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009385 "6131 Failed allocate "
9386 "Header RQBP\n");
9387 goto out_error;
9388 }
9389
Dick Kennedy4b40d022017-08-23 16:55:38 -07009390 /* Put list in known state in case driver load fails. */
9391 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9392
James Smart2d7dbc42017-02-12 13:52:35 -08009393 /* Create NVMET Receive Queue for data */
9394 qdesc = lpfc_sli4_queue_alloc(phba,
James Smart81b96ed2017-11-20 16:00:29 -08009395 LPFC_DEFAULT_PAGE_SIZE,
James Smart2d7dbc42017-02-12 13:52:35 -08009396 phba->sli4_hba.rq_esize,
James Smartc1a21eb2019-03-12 16:30:29 -07009397 LPFC_NVMET_RQE_DEF_COUNT,
9398 cpu);
James Smart2d7dbc42017-02-12 13:52:35 -08009399 if (!qdesc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009401 "3156 Failed allocate "
9402 "receive DRQ\n");
9403 goto out_error;
9404 }
James Smart5e5b5112019-01-28 11:14:22 -08009405 qdesc->hdwq = idx;
James Smart2d7dbc42017-02-12 13:52:35 -08009406 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9407 }
9408 }
9409
James Smart4c47efc2019-01-28 11:14:25 -08009410 /* Clear NVME stats */
9411 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9412 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9413 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9414 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9415 }
9416 }
James Smart4c47efc2019-01-28 11:14:25 -08009417
9418 /* Clear SCSI stats */
9419 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9420 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9421 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9422 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9423 }
9424 }
9425
James Smartda0436e2009-05-22 14:51:39 -04009426 return 0;
9427
James Smartda0436e2009-05-22 14:51:39 -04009428out_error:
James Smart67d12732012-08-03 12:36:13 -04009429 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04009430 return -ENOMEM;
9431}
9432
James Smart895427b2017-02-12 13:52:30 -08009433static inline void
9434__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9435{
9436 if (*qp != NULL) {
9437 lpfc_sli4_queue_free(*qp);
9438 *qp = NULL;
9439 }
9440}
9441
9442static inline void
9443lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9444{
9445 int idx;
9446
9447 if (*qs == NULL)
9448 return;
9449
9450 for (idx = 0; idx < max; idx++)
9451 __lpfc_sli4_release_queue(&(*qs)[idx]);
9452
9453 kfree(*qs);
9454 *qs = NULL;
9455}
9456
9457static inline void
James Smart6a828b02019-01-28 11:14:31 -08009458lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
James Smart895427b2017-02-12 13:52:30 -08009459{
James Smart6a828b02019-01-28 11:14:31 -08009460 struct lpfc_sli4_hdw_queue *hdwq;
James Smart657add42019-05-21 17:49:06 -07009461 struct lpfc_queue *eq;
James Smartcdb42be2019-01-28 11:14:21 -08009462 uint32_t idx;
9463
James Smart6a828b02019-01-28 11:14:31 -08009464 hdwq = phba->sli4_hba.hdwq;
James Smart6a828b02019-01-28 11:14:31 -08009465
James Smart657add42019-05-21 17:49:06 -07009466 /* Loop thru all Hardware Queues */
9467 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9468 /* Free the CQ/WQ corresponding to the Hardware Queue */
James Smartc00f62e2019-08-14 16:57:11 -07009469 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9470 lpfc_sli4_queue_free(hdwq[idx].io_wq);
James Smart821bc882020-01-27 16:23:05 -08009471 hdwq[idx].hba_eq = NULL;
James Smartc00f62e2019-08-14 16:57:11 -07009472 hdwq[idx].io_cq = NULL;
9473 hdwq[idx].io_wq = NULL;
James Smartd79c9e92019-08-14 16:57:09 -07009474 if (phba->cfg_xpsgl && !phba->nvmet_support)
9475 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9476 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
James Smart895427b2017-02-12 13:52:30 -08009477 }
James Smart657add42019-05-21 17:49:06 -07009478 /* Loop thru all IRQ vectors */
9479 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9480 /* Free the EQ corresponding to the IRQ vector */
9481 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9482 lpfc_sli4_queue_free(eq);
9483 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9484 }
James Smart895427b2017-02-12 13:52:30 -08009485}
9486
James Smartda0436e2009-05-22 14:51:39 -04009487/**
9488 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9489 * @phba: pointer to lpfc hba data structure.
9490 *
9491 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9492 * operation.
9493 *
9494 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02009495 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009496 * -ENOMEM - No available memory
James Smartd439d282010-09-29 11:18:45 -04009497 * -EIO - The mailbox failed to complete successfully.
James Smartda0436e2009-05-22 14:51:39 -04009498 **/
James Smart5350d872011-10-10 21:33:49 -04009499void
James Smartda0436e2009-05-22 14:51:39 -04009500lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9501{
James Smart4645f7b2019-03-12 16:30:14 -07009502 /*
9503 * Set FREE_INIT before beginning to free the queues.
9504 * Wait until the users of queues to acknowledge to
9505 * release queues by clearing FREE_WAIT.
9506 */
9507 spin_lock_irq(&phba->hbalock);
9508 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9509 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9510 spin_unlock_irq(&phba->hbalock);
9511 msleep(20);
9512 spin_lock_irq(&phba->hbalock);
9513 }
9514 spin_unlock_irq(&phba->hbalock);
9515
James Smart93a4d6f2019-11-04 16:57:05 -08009516 lpfc_sli4_cleanup_poll_list(phba);
9517
James Smart895427b2017-02-12 13:52:30 -08009518 /* Release HBA eqs */
James Smartcdb42be2019-01-28 11:14:21 -08009519 if (phba->sli4_hba.hdwq)
James Smart6a828b02019-01-28 11:14:31 -08009520 lpfc_sli4_release_hdwq(phba);
James Smartda0436e2009-05-22 14:51:39 -04009521
James Smartbcb24f62017-11-20 16:00:36 -08009522 if (phba->nvmet_support) {
9523 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9524 phba->cfg_nvmet_mrq);
James Smart2d7dbc42017-02-12 13:52:35 -08009525
James Smartbcb24f62017-11-20 16:00:36 -08009526 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9527 phba->cfg_nvmet_mrq);
9528 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9529 phba->cfg_nvmet_mrq);
9530 }
James Smart2d7dbc42017-02-12 13:52:35 -08009531
James Smartda0436e2009-05-22 14:51:39 -04009532 /* Release mailbox command work queue */
James Smart895427b2017-02-12 13:52:30 -08009533 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
James Smartda0436e2009-05-22 14:51:39 -04009534
9535 /* Release ELS work queue */
James Smart895427b2017-02-12 13:52:30 -08009536 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9537
9538 /* Release ELS work queue */
9539 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
James Smartda0436e2009-05-22 14:51:39 -04009540
9541 /* Release unsolicited receive queue */
James Smart895427b2017-02-12 13:52:30 -08009542 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9543 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
James Smartda0436e2009-05-22 14:51:39 -04009544
James Smartda0436e2009-05-22 14:51:39 -04009545 /* Release ELS complete queue */
James Smart895427b2017-02-12 13:52:30 -08009546 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9547
9548 /* Release NVME LS complete queue */
9549 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
James Smartda0436e2009-05-22 14:51:39 -04009550
9551 /* Release mailbox command complete queue */
James Smart895427b2017-02-12 13:52:30 -08009552 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9553
9554 /* Everything on this list has been freed */
9555 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
James Smart4645f7b2019-03-12 16:30:14 -07009556
9557 /* Done with freeing the queues */
9558 spin_lock_irq(&phba->hbalock);
9559 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9560 spin_unlock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08009561}
9562
9563int
James Smart895427b2017-02-12 13:52:30 -08009564lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9565{
9566 struct lpfc_rqb *rqbp;
9567 struct lpfc_dmabuf *h_buf;
9568 struct rqb_dmabuf *rqb_buffer;
9569
9570 rqbp = rq->rqbp;
9571 while (!list_empty(&rqbp->rqb_buffer_list)) {
9572 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9573 struct lpfc_dmabuf, list);
9574
9575 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9576 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9577 rqbp->buffer_count--;
9578 }
9579 return 1;
9580}
9581
9582static int
9583lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9584 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9585 int qidx, uint32_t qtype)
9586{
9587 struct lpfc_sli_ring *pring;
9588 int rc;
9589
9590 if (!eq || !cq || !wq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009591 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009592 "6085 Fast-path %s (%d) not allocated\n",
9593 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9594 return -ENOMEM;
James Smart67d12732012-08-03 12:36:13 -04009595 }
James Smartda0436e2009-05-22 14:51:39 -04009596
James Smart895427b2017-02-12 13:52:30 -08009597 /* create the Cq first */
9598 rc = lpfc_cq_create(phba, cq, eq,
9599 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9600 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9602 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9603 qidx, (uint32_t)rc);
James Smart895427b2017-02-12 13:52:30 -08009604 return rc;
9605 }
9606
9607 if (qtype != LPFC_MBOX) {
James Smartcdb42be2019-01-28 11:14:21 -08009608 /* Setup cq_map for fast lookup */
James Smart895427b2017-02-12 13:52:30 -08009609 if (cq_map)
9610 *cq_map = cq->queue_id;
9611
9612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9613 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9614 qidx, cq->queue_id, qidx, eq->queue_id);
9615
9616 /* create the wq */
9617 rc = lpfc_wq_create(phba, wq, cq, qtype);
9618 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartc835c082019-03-12 16:30:30 -07009620 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
James Smart895427b2017-02-12 13:52:30 -08009621 qidx, (uint32_t)rc);
9622 /* no need to tear down cq - caller will do so */
9623 return rc;
9624 }
9625
9626 /* Bind this CQ/WQ to the NVME ring */
9627 pring = wq->pring;
9628 pring->sli.sli4.wqp = (void *)wq;
9629 cq->pring = pring;
9630
9631 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9632 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9633 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9634 } else {
9635 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9636 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9638 "0539 Failed setup of slow-path MQ: "
9639 "rc = 0x%x\n", rc);
James Smart895427b2017-02-12 13:52:30 -08009640 /* no need to tear down cq - caller will do so */
9641 return rc;
9642 }
9643
9644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9645 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9646 phba->sli4_hba.mbx_wq->queue_id,
9647 phba->sli4_hba.mbx_cq->queue_id);
9648 }
9649
9650 return 0;
James Smartda0436e2009-05-22 14:51:39 -04009651}
9652
9653/**
James Smart6a828b02019-01-28 11:14:31 -08009654 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9655 * @phba: pointer to lpfc hba data structure.
9656 *
9657 * This routine will populate the cq_lookup table by all
9658 * available CQ queue_id's.
9659 **/
Bart Van Assche3999df72019-03-28 11:06:16 -07009660static void
James Smart6a828b02019-01-28 11:14:31 -08009661lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9662{
9663 struct lpfc_queue *eq, *childq;
James Smart6a828b02019-01-28 11:14:31 -08009664 int qidx;
9665
James Smart6a828b02019-01-28 11:14:31 -08009666 memset(phba->sli4_hba.cq_lookup, 0,
9667 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
James Smart657add42019-05-21 17:49:06 -07009668 /* Loop thru all IRQ vectors */
James Smart6a828b02019-01-28 11:14:31 -08009669 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
James Smart657add42019-05-21 17:49:06 -07009670 /* Get the EQ corresponding to the IRQ vector */
9671 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
James Smart6a828b02019-01-28 11:14:31 -08009672 if (!eq)
9673 continue;
James Smart657add42019-05-21 17:49:06 -07009674 /* Loop through all CQs associated with that EQ */
James Smart6a828b02019-01-28 11:14:31 -08009675 list_for_each_entry(childq, &eq->child_list, list) {
9676 if (childq->queue_id > phba->sli4_hba.cq_max)
9677 continue;
James Smartc00f62e2019-08-14 16:57:11 -07009678 if (childq->subtype == LPFC_IO)
James Smart6a828b02019-01-28 11:14:31 -08009679 phba->sli4_hba.cq_lookup[childq->queue_id] =
9680 childq;
9681 }
9682 }
9683}
9684
9685/**
James Smartda0436e2009-05-22 14:51:39 -04009686 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9687 * @phba: pointer to lpfc hba data structure.
9688 *
9689 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9690 * operation.
9691 *
9692 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02009693 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009694 * -ENOMEM - No available memory
James Smartd439d282010-09-29 11:18:45 -04009695 * -EIO - The mailbox failed to complete successfully.
James Smartda0436e2009-05-22 14:51:39 -04009696 **/
9697int
9698lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9699{
James Smart962bc512013-01-03 15:44:00 -05009700 uint32_t shdr_status, shdr_add_status;
9701 union lpfc_sli4_cfg_shdr *shdr;
James Smart657add42019-05-21 17:49:06 -07009702 struct lpfc_vector_map_info *cpup;
James Smartcdb42be2019-01-28 11:14:21 -08009703 struct lpfc_sli4_hdw_queue *qp;
James Smart962bc512013-01-03 15:44:00 -05009704 LPFC_MBOXQ_t *mboxq;
James Smart657add42019-05-21 17:49:06 -07009705 int qidx, cpu;
James Smartcb733e32019-01-28 11:14:32 -08009706 uint32_t length, usdelay;
James Smart895427b2017-02-12 13:52:30 -08009707 int rc = -ENOMEM;
James Smart962bc512013-01-03 15:44:00 -05009708
9709 /* Check for dual-ULP support */
9710 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9711 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart962bc512013-01-03 15:44:00 -05009713 "3249 Unable to allocate memory for "
9714 "QUERY_FW_CFG mailbox command\n");
9715 return -ENOMEM;
9716 }
9717 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9718 sizeof(struct lpfc_sli4_cfg_mhdr));
9719 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9720 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9721 length, LPFC_SLI4_MBX_EMBED);
9722
9723 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9724
9725 shdr = (union lpfc_sli4_cfg_shdr *)
9726 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9727 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9728 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9729 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart962bc512013-01-03 15:44:00 -05009731 "3250 QUERY_FW_CFG mailbox failed with status "
9732 "x%x add_status x%x, mbx status x%x\n",
9733 shdr_status, shdr_add_status, rc);
James Smart304ee432021-04-11 18:31:17 -07009734 mempool_free(mboxq, phba->mbox_mem_pool);
James Smart962bc512013-01-03 15:44:00 -05009735 rc = -ENXIO;
9736 goto out_error;
9737 }
9738
9739 phba->sli4_hba.fw_func_mode =
9740 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9741 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9742 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
James Smart8b017a32015-05-21 13:55:18 -04009743 phba->sli4_hba.physical_port =
9744 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
James Smart962bc512013-01-03 15:44:00 -05009745 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9746 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9747 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9748 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9749
James Smart304ee432021-04-11 18:31:17 -07009750 mempool_free(mboxq, phba->mbox_mem_pool);
James Smartda0436e2009-05-22 14:51:39 -04009751
9752 /*
James Smart67d12732012-08-03 12:36:13 -04009753 * Set up HBA Event Queues (EQs)
James Smartda0436e2009-05-22 14:51:39 -04009754 */
James Smartcdb42be2019-01-28 11:14:21 -08009755 qp = phba->sli4_hba.hdwq;
James Smartda0436e2009-05-22 14:51:39 -04009756
James Smart67d12732012-08-03 12:36:13 -04009757 /* Set up HBA event queue */
James Smartcdb42be2019-01-28 11:14:21 -08009758 if (!qp) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2e90f4b2011-12-13 13:22:37 -05009760 "3147 Fast-path EQs not allocated\n");
James Smart1b511972011-12-13 13:23:09 -05009761 rc = -ENOMEM;
James Smart67d12732012-08-03 12:36:13 -04009762 goto out_error;
James Smart2e90f4b2011-12-13 13:22:37 -05009763 }
James Smart657add42019-05-21 17:49:06 -07009764
9765 /* Loop thru all IRQ vectors */
James Smart6a828b02019-01-28 11:14:31 -08009766 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
James Smart657add42019-05-21 17:49:06 -07009767 /* Create HBA Event Queues (EQs) in order */
9768 for_each_present_cpu(cpu) {
9769 cpup = &phba->sli4_hba.cpu_map[cpu];
9770
9771 /* Look for the CPU thats using that vector with
9772 * LPFC_CPU_FIRST_IRQ set.
9773 */
9774 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9775 continue;
9776 if (qidx != cpup->eq)
9777 continue;
9778
9779 /* Create an EQ for that vector */
9780 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9781 phba->cfg_fcp_imax);
9782 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart657add42019-05-21 17:49:06 -07009784 "0523 Failed setup of fast-path"
9785 " EQ (%d), rc = 0x%x\n",
9786 cpup->eq, (uint32_t)rc);
9787 goto out_destroy;
9788 }
9789
9790 /* Save the EQ for that vector in the hba_eq_hdl */
9791 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9792 qp[cpup->hdwq].hba_eq;
9793
9794 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9795 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9796 cpup->eq,
9797 qp[cpup->hdwq].hba_eq->queue_id);
James Smartda0436e2009-05-22 14:51:39 -04009798 }
James Smartda0436e2009-05-22 14:51:39 -04009799 }
9800
James Smart657add42019-05-21 17:49:06 -07009801 /* Loop thru all Hardware Queues */
James Smartcdb42be2019-01-28 11:14:21 -08009802 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
James Smart657add42019-05-21 17:49:06 -07009803 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9804 cpup = &phba->sli4_hba.cpu_map[cpu];
9805
9806 /* Create the CQ/WQ corresponding to the Hardware Queue */
James Smartcdb42be2019-01-28 11:14:21 -08009807 rc = lpfc_create_wq_cq(phba,
James Smart657add42019-05-21 17:49:06 -07009808 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
James Smartc00f62e2019-08-14 16:57:11 -07009809 qp[qidx].io_cq,
9810 qp[qidx].io_wq,
9811 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9812 qidx,
9813 LPFC_IO);
James Smartcdb42be2019-01-28 11:14:21 -08009814 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009815 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009816 "0535 Failed to setup fastpath "
James Smartc00f62e2019-08-14 16:57:11 -07009817 "IO WQ/CQ (%d), rc = 0x%x\n",
James Smart895427b2017-02-12 13:52:30 -08009818 qidx, (uint32_t)rc);
James Smartcdb42be2019-01-28 11:14:21 -08009819 goto out_destroy;
James Smart895427b2017-02-12 13:52:30 -08009820 }
James Smart67d12732012-08-03 12:36:13 -04009821 }
James Smartda0436e2009-05-22 14:51:39 -04009822
9823 /*
James Smart895427b2017-02-12 13:52:30 -08009824 * Set up Slow Path Complete Queues (CQs)
James Smartda0436e2009-05-22 14:51:39 -04009825 */
9826
James Smart895427b2017-02-12 13:52:30 -08009827 /* Set up slow-path MBOX CQ/MQ */
9828
9829 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009831 "0528 %s not allocated\n",
9832 phba->sli4_hba.mbx_cq ?
James Smartd1f525a2017-04-21 16:04:55 -07009833 "Mailbox WQ" : "Mailbox CQ");
James Smart1b511972011-12-13 13:23:09 -05009834 rc = -ENOMEM;
James Smart895427b2017-02-12 13:52:30 -08009835 goto out_destroy;
James Smartda0436e2009-05-22 14:51:39 -04009836 }
James Smart895427b2017-02-12 13:52:30 -08009837
James Smartcdb42be2019-01-28 11:14:21 -08009838 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
James Smartd1f525a2017-04-21 16:04:55 -07009839 phba->sli4_hba.mbx_cq,
9840 phba->sli4_hba.mbx_wq,
9841 NULL, 0, LPFC_MBOX);
James Smartda0436e2009-05-22 14:51:39 -04009842 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009844 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9845 (uint32_t)rc);
9846 goto out_destroy;
James Smartda0436e2009-05-22 14:51:39 -04009847 }
James Smart2d7dbc42017-02-12 13:52:35 -08009848 if (phba->nvmet_support) {
9849 if (!phba->sli4_hba.nvmet_cqset) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009851 "3165 Fast-path NVME CQ Set "
9852 "array not allocated\n");
9853 rc = -ENOMEM;
9854 goto out_destroy;
9855 }
9856 if (phba->cfg_nvmet_mrq > 1) {
9857 rc = lpfc_cq_create_set(phba,
9858 phba->sli4_hba.nvmet_cqset,
James Smartcdb42be2019-01-28 11:14:21 -08009859 qp,
James Smart2d7dbc42017-02-12 13:52:35 -08009860 LPFC_WCQ, LPFC_NVMET);
9861 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009862 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009863 "3164 Failed setup of NVME CQ "
9864 "Set, rc = 0x%x\n",
9865 (uint32_t)rc);
9866 goto out_destroy;
9867 }
9868 } else {
9869 /* Set up NVMET Receive Complete Queue */
9870 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
James Smartcdb42be2019-01-28 11:14:21 -08009871 qp[0].hba_eq,
James Smart2d7dbc42017-02-12 13:52:35 -08009872 LPFC_WCQ, LPFC_NVMET);
9873 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009875 "6089 Failed setup NVMET CQ: "
9876 "rc = 0x%x\n", (uint32_t)rc);
9877 goto out_destroy;
9878 }
James Smart81b96ed2017-11-20 16:00:29 -08009879 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9880
James Smart2d7dbc42017-02-12 13:52:35 -08009881 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9882 "6090 NVMET CQ setup: cq-id=%d, "
9883 "parent eq-id=%d\n",
9884 phba->sli4_hba.nvmet_cqset[0]->queue_id,
James Smartcdb42be2019-01-28 11:14:21 -08009885 qp[0].hba_eq->queue_id);
James Smart2d7dbc42017-02-12 13:52:35 -08009886 }
9887 }
James Smartda0436e2009-05-22 14:51:39 -04009888
James Smart895427b2017-02-12 13:52:30 -08009889 /* Set up slow-path ELS WQ/CQ */
9890 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009892 "0530 ELS %s not allocated\n",
9893 phba->sli4_hba.els_cq ? "WQ" : "CQ");
James Smart1b511972011-12-13 13:23:09 -05009894 rc = -ENOMEM;
James Smart895427b2017-02-12 13:52:30 -08009895 goto out_destroy;
James Smartda0436e2009-05-22 14:51:39 -04009896 }
James Smartcdb42be2019-01-28 11:14:21 -08009897 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9898 phba->sli4_hba.els_cq,
9899 phba->sli4_hba.els_wq,
9900 NULL, 0, LPFC_ELS);
James Smartda0436e2009-05-22 14:51:39 -04009901 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcdb42be2019-01-28 11:14:21 -08009903 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9904 (uint32_t)rc);
James Smart895427b2017-02-12 13:52:30 -08009905 goto out_destroy;
James Smartda0436e2009-05-22 14:51:39 -04009906 }
9907 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9908 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9909 phba->sli4_hba.els_wq->queue_id,
9910 phba->sli4_hba.els_cq->queue_id);
9911
James Smartcdb42be2019-01-28 11:14:21 -08009912 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart895427b2017-02-12 13:52:30 -08009913 /* Set up NVME LS Complete Queue */
9914 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart895427b2017-02-12 13:52:30 -08009916 "6091 LS %s not allocated\n",
9917 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9918 rc = -ENOMEM;
9919 goto out_destroy;
9920 }
James Smartcdb42be2019-01-28 11:14:21 -08009921 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9922 phba->sli4_hba.nvmels_cq,
9923 phba->sli4_hba.nvmels_wq,
9924 NULL, 0, LPFC_NVME_LS);
James Smart895427b2017-02-12 13:52:30 -08009925 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009926 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartcdb42be2019-01-28 11:14:21 -08009927 "0526 Failed setup of NVVME LS WQ/CQ: "
9928 "rc = 0x%x\n", (uint32_t)rc);
James Smart895427b2017-02-12 13:52:30 -08009929 goto out_destroy;
9930 }
9931
9932 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9933 "6096 ELS WQ setup: wq-id=%d, "
9934 "parent cq-id=%d\n",
9935 phba->sli4_hba.nvmels_wq->queue_id,
9936 phba->sli4_hba.nvmels_cq->queue_id);
9937 }
9938
James Smart2d7dbc42017-02-12 13:52:35 -08009939 /*
9940 * Create NVMET Receive Queue (RQ)
9941 */
9942 if (phba->nvmet_support) {
9943 if ((!phba->sli4_hba.nvmet_cqset) ||
9944 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9945 (!phba->sli4_hba.nvmet_mrq_data)) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009947 "6130 MRQ CQ Queues not "
9948 "allocated\n");
9949 rc = -ENOMEM;
9950 goto out_destroy;
9951 }
9952 if (phba->cfg_nvmet_mrq > 1) {
9953 rc = lpfc_mrq_create(phba,
9954 phba->sli4_hba.nvmet_mrq_hdr,
9955 phba->sli4_hba.nvmet_mrq_data,
9956 phba->sli4_hba.nvmet_cqset,
9957 LPFC_NVMET);
9958 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009960 "6098 Failed setup of NVMET "
9961 "MRQ: rc = 0x%x\n",
9962 (uint32_t)rc);
9963 goto out_destroy;
9964 }
9965
9966 } else {
9967 rc = lpfc_rq_create(phba,
9968 phba->sli4_hba.nvmet_mrq_hdr[0],
9969 phba->sli4_hba.nvmet_mrq_data[0],
9970 phba->sli4_hba.nvmet_cqset[0],
9971 LPFC_NVMET);
9972 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2d7dbc42017-02-12 13:52:35 -08009974 "6057 Failed setup of NVMET "
9975 "Receive Queue: rc = 0x%x\n",
9976 (uint32_t)rc);
9977 goto out_destroy;
9978 }
9979
9980 lpfc_printf_log(
9981 phba, KERN_INFO, LOG_INIT,
9982 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9983 "dat-rq-id=%d parent cq-id=%d\n",
9984 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9985 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9986 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9987
9988 }
9989 }
9990
James Smartda0436e2009-05-22 14:51:39 -04009991 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
Dick Kennedy372c1872020-06-30 14:50:00 -07009992 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -04009993 "0540 Receive Queue not allocated\n");
James Smart1b511972011-12-13 13:23:09 -05009994 rc = -ENOMEM;
James Smart895427b2017-02-12 13:52:30 -08009995 goto out_destroy;
James Smartda0436e2009-05-22 14:51:39 -04009996 }
James Smart73d91e52011-10-10 21:32:10 -04009997
James Smartda0436e2009-05-22 14:51:39 -04009998 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
James Smart4d9ab992009-10-02 15:16:39 -04009999 phba->sli4_hba.els_cq, LPFC_USOL);
James Smartda0436e2009-05-22 14:51:39 -040010000 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070010001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -040010002 "0541 Failed setup of Receive Queue: "
James Smarta2fc4aef2014-09-03 12:57:55 -040010003 "rc = 0x%x\n", (uint32_t)rc);
James Smart895427b2017-02-12 13:52:30 -080010004 goto out_destroy;
James Smartda0436e2009-05-22 14:51:39 -040010005 }
James Smart73d91e52011-10-10 21:32:10 -040010006
James Smartda0436e2009-05-22 14:51:39 -040010007 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10008 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
10009 "parent cq-id=%d\n",
10010 phba->sli4_hba.hdr_rq->queue_id,
10011 phba->sli4_hba.dat_rq->queue_id,
James Smart4d9ab992009-10-02 15:16:39 -040010012 phba->sli4_hba.els_cq->queue_id);
James Smart1ba981f2014-02-20 09:56:45 -050010013
James Smartcb733e32019-01-28 11:14:32 -080010014 if (phba->cfg_fcp_imax)
10015 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
10016 else
10017 usdelay = 0;
10018
James Smart6a828b02019-01-28 11:14:31 -080010019 for (qidx = 0; qidx < phba->cfg_irq_chann;
James Smartcdb42be2019-01-28 11:14:21 -080010020 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
James Smart0cf07f842017-06-01 21:07:10 -070010021 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
James Smartcb733e32019-01-28 11:14:32 -080010022 usdelay);
James Smart43140ca2017-03-04 09:30:34 -080010023
James Smart6a828b02019-01-28 11:14:31 -080010024 if (phba->sli4_hba.cq_max) {
10025 kfree(phba->sli4_hba.cq_lookup);
10026 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
10027 sizeof(struct lpfc_queue *), GFP_KERNEL);
10028 if (!phba->sli4_hba.cq_lookup) {
Dick Kennedy372c1872020-06-30 14:50:00 -070010029 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart6a828b02019-01-28 11:14:31 -080010030 "0549 Failed setup of CQ Lookup table: "
10031 "size 0x%x\n", phba->sli4_hba.cq_max);
Dan Carpenterfad28e32019-02-11 21:43:00 +030010032 rc = -ENOMEM;
James Smart1ba981f2014-02-20 09:56:45 -050010033 goto out_destroy;
James Smart895427b2017-02-12 13:52:30 -080010034 }
James Smart6a828b02019-01-28 11:14:31 -080010035 lpfc_setup_cq_lookup(phba);
James Smart1ba981f2014-02-20 09:56:45 -050010036 }
James Smartda0436e2009-05-22 14:51:39 -040010037 return 0;
10038
James Smart895427b2017-02-12 13:52:30 -080010039out_destroy:
10040 lpfc_sli4_queue_unset(phba);
James Smartda0436e2009-05-22 14:51:39 -040010041out_error:
10042 return rc;
10043}
10044
10045/**
10046 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
10047 * @phba: pointer to lpfc hba data structure.
10048 *
10049 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
10050 * operation.
10051 *
10052 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010053 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010054 * -ENOMEM - No available memory
James Smartd439d282010-09-29 11:18:45 -040010055 * -EIO - The mailbox failed to complete successfully.
James Smartda0436e2009-05-22 14:51:39 -040010056 **/
10057void
10058lpfc_sli4_queue_unset(struct lpfc_hba *phba)
10059{
James Smartcdb42be2019-01-28 11:14:21 -080010060 struct lpfc_sli4_hdw_queue *qp;
James Smart657add42019-05-21 17:49:06 -070010061 struct lpfc_queue *eq;
James Smart895427b2017-02-12 13:52:30 -080010062 int qidx;
James Smartda0436e2009-05-22 14:51:39 -040010063
10064 /* Unset mailbox command work queue */
James Smart895427b2017-02-12 13:52:30 -080010065 if (phba->sli4_hba.mbx_wq)
10066 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
10067
10068 /* Unset NVME LS work queue */
10069 if (phba->sli4_hba.nvmels_wq)
10070 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
10071
James Smartda0436e2009-05-22 14:51:39 -040010072 /* Unset ELS work queue */
Colin Ian King019c0d62017-05-06 23:13:55 +010010073 if (phba->sli4_hba.els_wq)
James Smart895427b2017-02-12 13:52:30 -080010074 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
10075
James Smartda0436e2009-05-22 14:51:39 -040010076 /* Unset unsolicited receive queue */
James Smart895427b2017-02-12 13:52:30 -080010077 if (phba->sli4_hba.hdr_rq)
10078 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
10079 phba->sli4_hba.dat_rq);
10080
James Smartda0436e2009-05-22 14:51:39 -040010081 /* Unset mailbox command complete queue */
James Smart895427b2017-02-12 13:52:30 -080010082 if (phba->sli4_hba.mbx_cq)
10083 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
10084
James Smartda0436e2009-05-22 14:51:39 -040010085 /* Unset ELS complete queue */
James Smart895427b2017-02-12 13:52:30 -080010086 if (phba->sli4_hba.els_cq)
10087 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
10088
10089 /* Unset NVME LS complete queue */
10090 if (phba->sli4_hba.nvmels_cq)
10091 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
10092
James Smartbcb24f62017-11-20 16:00:36 -080010093 if (phba->nvmet_support) {
10094 /* Unset NVMET MRQ queue */
10095 if (phba->sli4_hba.nvmet_mrq_hdr) {
10096 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10097 lpfc_rq_destroy(
10098 phba,
James Smart2d7dbc42017-02-12 13:52:35 -080010099 phba->sli4_hba.nvmet_mrq_hdr[qidx],
10100 phba->sli4_hba.nvmet_mrq_data[qidx]);
James Smartbcb24f62017-11-20 16:00:36 -080010101 }
James Smart2d7dbc42017-02-12 13:52:35 -080010102
James Smartbcb24f62017-11-20 16:00:36 -080010103 /* Unset NVMET CQ Set complete queue */
10104 if (phba->sli4_hba.nvmet_cqset) {
10105 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10106 lpfc_cq_destroy(
10107 phba, phba->sli4_hba.nvmet_cqset[qidx]);
10108 }
James Smart2d7dbc42017-02-12 13:52:35 -080010109 }
10110
James Smartcdb42be2019-01-28 11:14:21 -080010111 /* Unset fast-path SLI4 queues */
10112 if (phba->sli4_hba.hdwq) {
James Smart657add42019-05-21 17:49:06 -070010113 /* Loop thru all Hardware Queues */
James Smartcdb42be2019-01-28 11:14:21 -080010114 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
James Smart657add42019-05-21 17:49:06 -070010115 /* Destroy the CQ/WQ corresponding to Hardware Queue */
James Smartcdb42be2019-01-28 11:14:21 -080010116 qp = &phba->sli4_hba.hdwq[qidx];
James Smartc00f62e2019-08-14 16:57:11 -070010117 lpfc_wq_destroy(phba, qp->io_wq);
10118 lpfc_cq_destroy(phba, qp->io_cq);
James Smart657add42019-05-21 17:49:06 -070010119 }
10120 /* Loop thru all IRQ vectors */
10121 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10122 /* Destroy the EQ corresponding to the IRQ vector */
10123 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10124 lpfc_eq_destroy(phba, eq);
James Smartcdb42be2019-01-28 11:14:21 -080010125 }
10126 }
James Smart895427b2017-02-12 13:52:30 -080010127
James Smart6a828b02019-01-28 11:14:31 -080010128 kfree(phba->sli4_hba.cq_lookup);
10129 phba->sli4_hba.cq_lookup = NULL;
10130 phba->sli4_hba.cq_max = 0;
James Smartda0436e2009-05-22 14:51:39 -040010131}
10132
10133/**
10134 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
10135 * @phba: pointer to lpfc hba data structure.
10136 *
10137 * This routine is invoked to allocate and set up a pool of completion queue
10138 * events. The body of the completion queue event is a completion queue entry
10139 * CQE. For now, this pool is used for the interrupt service routine to queue
10140 * the following HBA completion queue events for the worker thread to process:
10141 * - Mailbox asynchronous events
10142 * - Receive queue completion unsolicited events
10143 * Later, this can be used for all the slow-path events.
10144 *
10145 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010146 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010147 * -ENOMEM - No available memory
James Smartda0436e2009-05-22 14:51:39 -040010148 **/
10149static int
10150lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10151{
10152 struct lpfc_cq_event *cq_event;
10153 int i;
10154
10155 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10156 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10157 if (!cq_event)
10158 goto out_pool_create_fail;
10159 list_add_tail(&cq_event->list,
10160 &phba->sli4_hba.sp_cqe_event_pool);
10161 }
10162 return 0;
10163
10164out_pool_create_fail:
10165 lpfc_sli4_cq_event_pool_destroy(phba);
10166 return -ENOMEM;
10167}
10168
10169/**
10170 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
10171 * @phba: pointer to lpfc hba data structure.
10172 *
10173 * This routine is invoked to free the pool of completion queue events at
10174 * driver unload time. Note that, it is the responsibility of the driver
10175 * cleanup routine to free all the outstanding completion-queue events
10176 * allocated from this pool back into the pool before invoking this routine
10177 * to destroy the pool.
10178 **/
10179static void
10180lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10181{
10182 struct lpfc_cq_event *cq_event, *next_cq_event;
10183
10184 list_for_each_entry_safe(cq_event, next_cq_event,
10185 &phba->sli4_hba.sp_cqe_event_pool, list) {
10186 list_del(&cq_event->list);
10187 kfree(cq_event);
10188 }
10189}
10190
10191/**
10192 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10193 * @phba: pointer to lpfc hba data structure.
10194 *
10195 * This routine is the lock free version of the API invoked to allocate a
10196 * completion-queue event from the free pool.
10197 *
10198 * Return: Pointer to the newly allocated completion-queue event if successful
10199 * NULL otherwise.
10200 **/
10201struct lpfc_cq_event *
10202__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10203{
10204 struct lpfc_cq_event *cq_event = NULL;
10205
10206 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10207 struct lpfc_cq_event, list);
10208 return cq_event;
10209}
10210
10211/**
10212 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10213 * @phba: pointer to lpfc hba data structure.
10214 *
10215 * This routine is the lock version of the API invoked to allocate a
10216 * completion-queue event from the free pool.
10217 *
10218 * Return: Pointer to the newly allocated completion-queue event if successful
10219 * NULL otherwise.
10220 **/
10221struct lpfc_cq_event *
10222lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10223{
10224 struct lpfc_cq_event *cq_event;
10225 unsigned long iflags;
10226
10227 spin_lock_irqsave(&phba->hbalock, iflags);
10228 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10229 spin_unlock_irqrestore(&phba->hbalock, iflags);
10230 return cq_event;
10231}
10232
10233/**
10234 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10235 * @phba: pointer to lpfc hba data structure.
10236 * @cq_event: pointer to the completion queue event to be freed.
10237 *
10238 * This routine is the lock free version of the API invoked to release a
10239 * completion-queue event back into the free pool.
10240 **/
10241void
10242__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10243 struct lpfc_cq_event *cq_event)
10244{
10245 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10246}
10247
10248/**
10249 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10250 * @phba: pointer to lpfc hba data structure.
10251 * @cq_event: pointer to the completion queue event to be freed.
10252 *
10253 * This routine is the lock version of the API invoked to release a
10254 * completion-queue event back into the free pool.
10255 **/
10256void
10257lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10258 struct lpfc_cq_event *cq_event)
10259{
10260 unsigned long iflags;
10261 spin_lock_irqsave(&phba->hbalock, iflags);
10262 __lpfc_sli4_cq_event_release(phba, cq_event);
10263 spin_unlock_irqrestore(&phba->hbalock, iflags);
10264}
10265
10266/**
10267 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10268 * @phba: pointer to lpfc hba data structure.
10269 *
10270 * This routine is to free all the pending completion-queue events to the
10271 * back into the free pool for device reset.
10272 **/
10273static void
10274lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10275{
James Smarte7dab162020-10-20 13:27:12 -070010276 LIST_HEAD(cq_event_list);
10277 struct lpfc_cq_event *cq_event;
James Smartda0436e2009-05-22 14:51:39 -040010278 unsigned long iflags;
10279
10280 /* Retrieve all the pending WCQEs from pending WCQE lists */
James Smartda0436e2009-05-22 14:51:39 -040010281
James Smarte7dab162020-10-20 13:27:12 -070010282 /* Pending ELS XRI abort events */
10283 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10284 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10285 &cq_event_list);
10286 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10287
10288 /* Pending asynnc events */
10289 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
10290 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10291 &cq_event_list);
10292 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
10293
10294 while (!list_empty(&cq_event_list)) {
10295 list_remove_head(&cq_event_list, cq_event,
10296 struct lpfc_cq_event, list);
10297 lpfc_sli4_cq_event_release(phba, cq_event);
James Smartda0436e2009-05-22 14:51:39 -040010298 }
10299}
10300
10301/**
10302 * lpfc_pci_function_reset - Reset pci function.
10303 * @phba: pointer to lpfc hba data structure.
10304 *
10305 * This routine is invoked to request a PCI function reset. It will destroys
10306 * all resources assigned to the PCI function which originates this request.
10307 *
10308 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010309 * 0 - successful
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010310 * -ENOMEM - No available memory
James Smartd439d282010-09-29 11:18:45 -040010311 * -EIO - The mailbox failed to complete successfully.
James Smartda0436e2009-05-22 14:51:39 -040010312 **/
10313int
10314lpfc_pci_function_reset(struct lpfc_hba *phba)
10315{
10316 LPFC_MBOXQ_t *mboxq;
James Smart2fcee4b2010-12-15 17:57:46 -050010317 uint32_t rc = 0, if_type;
James Smartda0436e2009-05-22 14:51:39 -040010318 uint32_t shdr_status, shdr_add_status;
James Smart2f6fa2c2014-09-03 12:57:08 -040010319 uint32_t rdy_chk;
10320 uint32_t port_reset = 0;
James Smartda0436e2009-05-22 14:51:39 -040010321 union lpfc_sli4_cfg_shdr *shdr;
James Smart2fcee4b2010-12-15 17:57:46 -050010322 struct lpfc_register reg_data;
James Smart2b81f942012-03-01 22:37:18 -050010323 uint16_t devid;
James Smartda0436e2009-05-22 14:51:39 -040010324
James Smart2fcee4b2010-12-15 17:57:46 -050010325 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10326 switch (if_type) {
10327 case LPFC_SLI_INTF_IF_TYPE_0:
10328 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10329 GFP_KERNEL);
10330 if (!mboxq) {
Dick Kennedy372c1872020-06-30 14:50:00 -070010331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -050010332 "0494 Unable to allocate memory for "
10333 "issuing SLI_FUNCTION_RESET mailbox "
10334 "command\n");
10335 return -ENOMEM;
10336 }
10337
10338 /* Setup PCI function reset mailbox-ioctl command */
10339 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10340 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10341 LPFC_SLI4_MBX_EMBED);
10342 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10343 shdr = (union lpfc_sli4_cfg_shdr *)
10344 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10345 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10346 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10347 &shdr->response);
James Smart304ee432021-04-11 18:31:17 -070010348 mempool_free(mboxq, phba->mbox_mem_pool);
James Smart2fcee4b2010-12-15 17:57:46 -050010349 if (shdr_status || shdr_add_status || rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070010350 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -050010351 "0495 SLI_FUNCTION_RESET mailbox "
10352 "failed with status x%x add_status x%x,"
10353 " mbx status x%x\n",
10354 shdr_status, shdr_add_status, rc);
10355 rc = -ENXIO;
10356 }
10357 break;
10358 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart27d6ac02018-02-22 08:18:42 -080010359 case LPFC_SLI_INTF_IF_TYPE_6:
James Smart2f6fa2c2014-09-03 12:57:08 -040010360wait:
10361 /*
10362 * Poll the Port Status Register and wait for RDY for
10363 * up to 30 seconds. If the port doesn't respond, treat
10364 * it as an error.
10365 */
James Smart77d093f2015-04-07 15:07:08 -040010366 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
James Smart2f6fa2c2014-09-03 12:57:08 -040010367 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10368 STATUSregaddr, &reg_data.word0)) {
10369 rc = -ENODEV;
10370 goto out;
10371 }
10372 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10373 break;
10374 msleep(20);
10375 }
10376
10377 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10378 phba->work_status[0] = readl(
10379 phba->sli4_hba.u.if_type2.ERR1regaddr);
10380 phba->work_status[1] = readl(
10381 phba->sli4_hba.u.if_type2.ERR2regaddr);
Dick Kennedy372c1872020-06-30 14:50:00 -070010382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2f6fa2c2014-09-03 12:57:08 -040010383 "2890 Port not ready, port status reg "
10384 "0x%x error 1=0x%x, error 2=0x%x\n",
10385 reg_data.word0,
10386 phba->work_status[0],
10387 phba->work_status[1]);
10388 rc = -ENODEV;
10389 goto out;
10390 }
10391
10392 if (!port_reset) {
10393 /*
10394 * Reset the port now
10395 */
James Smart2fcee4b2010-12-15 17:57:46 -050010396 reg_data.word0 = 0;
10397 bf_set(lpfc_sliport_ctrl_end, &reg_data,
10398 LPFC_SLIPORT_LITTLE_ENDIAN);
10399 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10400 LPFC_SLIPORT_INIT_PORT);
10401 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10402 CTRLregaddr);
James Smart8fcb8ac2012-03-01 22:35:58 -050010403 /* flush */
James Smart2b81f942012-03-01 22:37:18 -050010404 pci_read_config_word(phba->pcidev,
10405 PCI_DEVICE_ID, &devid);
James Smart2fcee4b2010-12-15 17:57:46 -050010406
James Smart2f6fa2c2014-09-03 12:57:08 -040010407 port_reset = 1;
10408 msleep(20);
10409 goto wait;
10410 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10411 rc = -ENODEV;
10412 goto out;
James Smart2fcee4b2010-12-15 17:57:46 -050010413 }
10414 break;
James Smart2f6fa2c2014-09-03 12:57:08 -040010415
James Smart2fcee4b2010-12-15 17:57:46 -050010416 case LPFC_SLI_INTF_IF_TYPE_1:
10417 default:
10418 break;
James Smartda0436e2009-05-22 14:51:39 -040010419 }
10420
James Smart73d91e52011-10-10 21:32:10 -040010421out:
James Smart2fcee4b2010-12-15 17:57:46 -050010422 /* Catch the not-ready port failure after a port reset. */
James Smart2f6fa2c2014-09-03 12:57:08 -040010423 if (rc) {
Dick Kennedy372c1872020-06-30 14:50:00 -070010424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart229adb02013-04-17 20:16:51 -040010425 "3317 HBA not functional: IP Reset Failed "
James Smart2f6fa2c2014-09-03 12:57:08 -040010426 "try: echo fw_reset > board_mode\n");
James Smart2fcee4b2010-12-15 17:57:46 -050010427 rc = -ENODEV;
James Smart229adb02013-04-17 20:16:51 -040010428 }
James Smart2fcee4b2010-12-15 17:57:46 -050010429
James Smartda0436e2009-05-22 14:51:39 -040010430 return rc;
10431}
10432
10433/**
James Smartda0436e2009-05-22 14:51:39 -040010434 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10435 * @phba: pointer to lpfc hba data structure.
10436 *
10437 * This routine is invoked to set up the PCI device memory space for device
10438 * with SLI-4 interface spec.
10439 *
10440 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010441 * 0 - successful
James Smartda0436e2009-05-22 14:51:39 -040010442 * other values - error
10443 **/
10444static int
10445lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10446{
Christoph Hellwigf30e1bf2018-10-18 15:10:21 +020010447 struct pci_dev *pdev = phba->pcidev;
James Smartda0436e2009-05-22 14:51:39 -040010448 unsigned long bar0map_len, bar1map_len, bar2map_len;
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010449 int error;
James Smart2fcee4b2010-12-15 17:57:46 -050010450 uint32_t if_type;
James Smartda0436e2009-05-22 14:51:39 -040010451
Christoph Hellwigf30e1bf2018-10-18 15:10:21 +020010452 if (!pdev)
Hannes Reinecke56de8352019-02-18 08:34:19 +010010453 return -ENODEV;
James Smartda0436e2009-05-22 14:51:39 -040010454
10455 /* Set the device DMA mask size */
Hannes Reinecke56de8352019-02-18 08:34:19 +010010456 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10457 if (error)
10458 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10459 if (error)
Christoph Hellwigf30e1bf2018-10-18 15:10:21 +020010460 return error;
James Smartda0436e2009-05-22 14:51:39 -040010461
James Smart2fcee4b2010-12-15 17:57:46 -050010462 /*
10463 * The BARs and register set definitions and offset locations are
10464 * dependent on the if_type.
10465 */
10466 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10467 &phba->sli4_hba.sli_intf.word0)) {
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010468 return -ENODEV;
James Smart2fcee4b2010-12-15 17:57:46 -050010469 }
10470
10471 /* There is no SLI3 failback for SLI4 devices. */
10472 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10473 LPFC_SLI_INTF_VALID) {
Dick Kennedy372c1872020-06-30 14:50:00 -070010474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart2fcee4b2010-12-15 17:57:46 -050010475 "2894 SLI_INTF reg contents invalid "
10476 "sli_intf reg 0x%x\n",
10477 phba->sli4_hba.sli_intf.word0);
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010478 return -ENODEV;
James Smart2fcee4b2010-12-15 17:57:46 -050010479 }
10480
10481 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10482 /*
10483 * Get the bus address of SLI4 device Bar regions and the
10484 * number of bytes required by each mapping. The mapping of the
10485 * particular PCI BARs regions is dependent on the type of
10486 * SLI4 device.
James Smartda0436e2009-05-22 14:51:39 -040010487 */
James Smartf5ca6f22013-09-06 12:21:09 -040010488 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10489 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10490 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
James Smart2fcee4b2010-12-15 17:57:46 -050010491
10492 /*
10493 * Map SLI4 PCI Config Space Register base to a kernel virtual
10494 * addr
10495 */
10496 phba->sli4_hba.conf_regs_memmap_p =
10497 ioremap(phba->pci_bar0_map, bar0map_len);
10498 if (!phba->sli4_hba.conf_regs_memmap_p) {
10499 dev_printk(KERN_ERR, &pdev->dev,
10500 "ioremap failed for SLI4 PCI config "
10501 "registers.\n");
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010502 return -ENODEV;
James Smart2fcee4b2010-12-15 17:57:46 -050010503 }
James Smartf5ca6f22013-09-06 12:21:09 -040010504 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
James Smart2fcee4b2010-12-15 17:57:46 -050010505 /* Set up BAR0 PCI config space register memory map */
10506 lpfc_sli4_bar0_register_memmap(phba, if_type);
James Smart1dfb5a42010-02-12 14:40:50 -050010507 } else {
10508 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10509 bar0map_len = pci_resource_len(pdev, 1);
James Smart27d6ac02018-02-22 08:18:42 -080010510 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
James Smart2fcee4b2010-12-15 17:57:46 -050010511 dev_printk(KERN_ERR, &pdev->dev,
10512 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010513 return -ENODEV;
James Smart2fcee4b2010-12-15 17:57:46 -050010514 }
10515 phba->sli4_hba.conf_regs_memmap_p =
James Smartda0436e2009-05-22 14:51:39 -040010516 ioremap(phba->pci_bar0_map, bar0map_len);
James Smart2fcee4b2010-12-15 17:57:46 -050010517 if (!phba->sli4_hba.conf_regs_memmap_p) {
10518 dev_printk(KERN_ERR, &pdev->dev,
10519 "ioremap failed for SLI4 PCI config "
10520 "registers.\n");
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010521 return -ENODEV;
James Smart2fcee4b2010-12-15 17:57:46 -050010522 }
10523 lpfc_sli4_bar0_register_memmap(phba, if_type);
James Smartda0436e2009-05-22 14:51:39 -040010524 }
10525
James Smarte4b97942017-11-20 16:00:31 -080010526 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10527 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10528 /*
10529 * Map SLI4 if type 0 HBA Control Register base to a
10530 * kernel virtual address and setup the registers.
10531 */
10532 phba->pci_bar1_map = pci_resource_start(pdev,
10533 PCI_64BIT_BAR2);
10534 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10535 phba->sli4_hba.ctrl_regs_memmap_p =
10536 ioremap(phba->pci_bar1_map,
10537 bar1map_len);
10538 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10539 dev_err(&pdev->dev,
10540 "ioremap failed for SLI4 HBA "
10541 "control registers.\n");
10542 error = -ENOMEM;
10543 goto out_iounmap_conf;
10544 }
10545 phba->pci_bar2_memmap_p =
10546 phba->sli4_hba.ctrl_regs_memmap_p;
James Smart27d6ac02018-02-22 08:18:42 -080010547 lpfc_sli4_bar1_register_memmap(phba, if_type);
James Smarte4b97942017-11-20 16:00:31 -080010548 } else {
10549 error = -ENOMEM;
James Smart2fcee4b2010-12-15 17:57:46 -050010550 goto out_iounmap_conf;
10551 }
James Smartda0436e2009-05-22 14:51:39 -040010552 }
10553
James Smart27d6ac02018-02-22 08:18:42 -080010554 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10555 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10556 /*
10557 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10558 * virtual address and setup the registers.
10559 */
10560 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10561 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10562 phba->sli4_hba.drbl_regs_memmap_p =
10563 ioremap(phba->pci_bar1_map, bar1map_len);
10564 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10565 dev_err(&pdev->dev,
10566 "ioremap failed for SLI4 HBA doorbell registers.\n");
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010567 error = -ENOMEM;
James Smart27d6ac02018-02-22 08:18:42 -080010568 goto out_iounmap_conf;
10569 }
10570 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10571 lpfc_sli4_bar1_register_memmap(phba, if_type);
10572 }
10573
James Smarte4b97942017-11-20 16:00:31 -080010574 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10575 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10576 /*
10577 * Map SLI4 if type 0 HBA Doorbell Register base to
10578 * a kernel virtual address and setup the registers.
10579 */
10580 phba->pci_bar2_map = pci_resource_start(pdev,
10581 PCI_64BIT_BAR4);
10582 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10583 phba->sli4_hba.drbl_regs_memmap_p =
10584 ioremap(phba->pci_bar2_map,
10585 bar2map_len);
10586 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10587 dev_err(&pdev->dev,
10588 "ioremap failed for SLI4 HBA"
10589 " doorbell registers.\n");
10590 error = -ENOMEM;
10591 goto out_iounmap_ctrl;
10592 }
10593 phba->pci_bar4_memmap_p =
10594 phba->sli4_hba.drbl_regs_memmap_p;
10595 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10596 if (error)
10597 goto out_iounmap_all;
10598 } else {
10599 error = -ENOMEM;
James Smart2fcee4b2010-12-15 17:57:46 -050010600 goto out_iounmap_all;
James Smarte4b97942017-11-20 16:00:31 -080010601 }
James Smartda0436e2009-05-22 14:51:39 -040010602 }
10603
James Smart1351e692018-02-22 08:18:43 -080010604 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10605 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10606 /*
10607 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10608 * virtual address and setup the registers.
10609 */
10610 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10611 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10612 phba->sli4_hba.dpp_regs_memmap_p =
10613 ioremap(phba->pci_bar2_map, bar2map_len);
10614 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10615 dev_err(&pdev->dev,
10616 "ioremap failed for SLI4 HBA dpp registers.\n");
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010617 error = -ENOMEM;
James Smart1351e692018-02-22 08:18:43 -080010618 goto out_iounmap_ctrl;
10619 }
10620 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10621 }
10622
James Smartb71413d2018-02-22 08:18:40 -080010623 /* Set up the EQ/CQ register handeling functions now */
James Smart27d6ac02018-02-22 08:18:42 -080010624 switch (if_type) {
10625 case LPFC_SLI_INTF_IF_TYPE_0:
10626 case LPFC_SLI_INTF_IF_TYPE_2:
James Smartb71413d2018-02-22 08:18:40 -080010627 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
James Smart32517fc2019-01-28 11:14:33 -080010628 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10629 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
James Smart27d6ac02018-02-22 08:18:42 -080010630 break;
10631 case LPFC_SLI_INTF_IF_TYPE_6:
10632 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
James Smart32517fc2019-01-28 11:14:33 -080010633 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10634 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
James Smart27d6ac02018-02-22 08:18:42 -080010635 break;
10636 default:
10637 break;
James Smartb71413d2018-02-22 08:18:40 -080010638 }
10639
James Smartda0436e2009-05-22 14:51:39 -040010640 return 0;
10641
10642out_iounmap_all:
10643 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10644out_iounmap_ctrl:
10645 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10646out_iounmap_conf:
10647 iounmap(phba->sli4_hba.conf_regs_memmap_p);
Dan Carpenter3a487ff2019-03-07 08:33:44 +030010648
James Smartda0436e2009-05-22 14:51:39 -040010649 return error;
10650}
10651
10652/**
10653 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10654 * @phba: pointer to lpfc hba data structure.
10655 *
10656 * This routine is invoked to unset the PCI device memory space for device
10657 * with SLI-4 interface spec.
10658 **/
10659static void
10660lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10661{
James Smart2e90f4b2011-12-13 13:22:37 -050010662 uint32_t if_type;
10663 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
James Smartda0436e2009-05-22 14:51:39 -040010664
James Smart2e90f4b2011-12-13 13:22:37 -050010665 switch (if_type) {
10666 case LPFC_SLI_INTF_IF_TYPE_0:
10667 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10668 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10669 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10670 break;
10671 case LPFC_SLI_INTF_IF_TYPE_2:
10672 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10673 break;
James Smart27d6ac02018-02-22 08:18:42 -080010674 case LPFC_SLI_INTF_IF_TYPE_6:
10675 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10676 iounmap(phba->sli4_hba.conf_regs_memmap_p);
James Smart0b439192019-12-18 15:58:05 -080010677 if (phba->sli4_hba.dpp_regs_memmap_p)
10678 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
James Smart27d6ac02018-02-22 08:18:42 -080010679 break;
James Smart2e90f4b2011-12-13 13:22:37 -050010680 case LPFC_SLI_INTF_IF_TYPE_1:
10681 default:
10682 dev_printk(KERN_ERR, &phba->pcidev->dev,
10683 "FATAL - unsupported SLI4 interface type - %d\n",
10684 if_type);
10685 break;
10686 }
James Smartda0436e2009-05-22 14:51:39 -040010687}
10688
10689/**
James Smart3772a992009-05-22 14:50:54 -040010690 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10691 * @phba: pointer to lpfc hba data structure.
10692 *
10693 * This routine is invoked to enable the MSI-X interrupt vectors to device
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010694 * with SLI-3 interface specs.
James Smart3772a992009-05-22 14:50:54 -040010695 *
10696 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010697 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -040010698 * other values - error
10699 **/
10700static int
10701lpfc_sli_enable_msix(struct lpfc_hba *phba)
10702{
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010703 int rc;
James Smart3772a992009-05-22 14:50:54 -040010704 LPFC_MBOXQ_t *pmb;
10705
10706 /* Set up MSI-X multi-message vectors */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010707 rc = pci_alloc_irq_vectors(phba->pcidev,
10708 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10709 if (rc < 0) {
James Smart3772a992009-05-22 14:50:54 -040010710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10711 "0420 PCI enable MSI-X failed (%d)\n", rc);
Alexander Gordeev029165a2014-07-16 20:05:15 +020010712 goto vec_fail_out;
James Smart3772a992009-05-22 14:50:54 -040010713 }
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010714
James Smart3772a992009-05-22 14:50:54 -040010715 /*
10716 * Assign MSI-X vectors to interrupt handlers
10717 */
10718
10719 /* vector-0 is associated to slow-path handler */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010720 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
James Smarted243d32015-05-21 13:55:25 -040010721 &lpfc_sli_sp_intr_handler, 0,
James Smart3772a992009-05-22 14:50:54 -040010722 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10723 if (rc) {
10724 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10725 "0421 MSI-X slow-path request_irq failed "
10726 "(%d)\n", rc);
10727 goto msi_fail_out;
10728 }
10729
10730 /* vector-1 is associated to fast-path handler */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010731 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
James Smarted243d32015-05-21 13:55:25 -040010732 &lpfc_sli_fp_intr_handler, 0,
James Smart3772a992009-05-22 14:50:54 -040010733 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10734
10735 if (rc) {
10736 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10737 "0429 MSI-X fast-path request_irq failed "
10738 "(%d)\n", rc);
10739 goto irq_fail_out;
10740 }
10741
10742 /*
10743 * Configure HBA MSI-X attention conditions to messages
10744 */
10745 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10746
10747 if (!pmb) {
10748 rc = -ENOMEM;
Dick Kennedy372c1872020-06-30 14:50:00 -070010749 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040010750 "0474 Unable to allocate memory for issuing "
10751 "MBOX_CONFIG_MSI command\n");
10752 goto mem_fail_out;
10753 }
10754 rc = lpfc_config_msi(phba, pmb);
10755 if (rc)
10756 goto mbx_fail_out;
10757 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10758 if (rc != MBX_SUCCESS) {
10759 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10760 "0351 Config MSI mailbox command failed, "
10761 "mbxCmd x%x, mbxStatus x%x\n",
10762 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10763 goto mbx_fail_out;
10764 }
10765
10766 /* Free memory allocated for mailbox command */
10767 mempool_free(pmb, phba->mbox_mem_pool);
10768 return rc;
10769
10770mbx_fail_out:
10771 /* Free memory allocated for mailbox command */
10772 mempool_free(pmb, phba->mbox_mem_pool);
10773
10774mem_fail_out:
10775 /* free the irq already requested */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010776 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
James Smart3772a992009-05-22 14:50:54 -040010777
10778irq_fail_out:
10779 /* free the irq already requested */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010780 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
James Smart3772a992009-05-22 14:50:54 -040010781
10782msi_fail_out:
10783 /* Unconfigure MSI-X capability structure */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010784 pci_free_irq_vectors(phba->pcidev);
Alexander Gordeev029165a2014-07-16 20:05:15 +020010785
10786vec_fail_out:
James Smart3772a992009-05-22 14:50:54 -040010787 return rc;
10788}
10789
10790/**
James Smart3772a992009-05-22 14:50:54 -040010791 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10792 * @phba: pointer to lpfc hba data structure.
10793 *
10794 * This routine is invoked to enable the MSI interrupt mode to device with
10795 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10796 * enable the MSI vector. The device driver is responsible for calling the
10797 * request_irq() to register MSI vector with a interrupt the handler, which
10798 * is done in this function.
10799 *
10800 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010801 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -040010802 * other values - error
10803 */
10804static int
10805lpfc_sli_enable_msi(struct lpfc_hba *phba)
10806{
10807 int rc;
10808
10809 rc = pci_enable_msi(phba->pcidev);
10810 if (!rc)
10811 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10812 "0462 PCI enable MSI mode success.\n");
10813 else {
10814 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10815 "0471 PCI enable MSI mode failed (%d)\n", rc);
10816 return rc;
10817 }
10818
10819 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
James Smarted243d32015-05-21 13:55:25 -040010820 0, LPFC_DRIVER_NAME, phba);
James Smart3772a992009-05-22 14:50:54 -040010821 if (rc) {
10822 pci_disable_msi(phba->pcidev);
10823 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10824 "0478 MSI request_irq failed (%d)\n", rc);
10825 }
10826 return rc;
10827}
10828
10829/**
James Smart3772a992009-05-22 14:50:54 -040010830 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10831 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +010010832 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
James Smart3772a992009-05-22 14:50:54 -040010833 *
10834 * This routine is invoked to enable device interrupt and associate driver's
10835 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10836 * spec. Depends on the interrupt mode configured to the driver, the driver
10837 * will try to fallback from the configured interrupt mode to an interrupt
10838 * mode which is supported by the platform, kernel, and device in the order
10839 * of:
10840 * MSI-X -> MSI -> IRQ.
10841 *
10842 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020010843 * 0 - successful
James Smart3772a992009-05-22 14:50:54 -040010844 * other values - error
10845 **/
10846static uint32_t
10847lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10848{
10849 uint32_t intr_mode = LPFC_INTR_ERROR;
10850 int retval;
10851
James Smartd2f25472021-01-04 10:02:27 -080010852 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10853 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10854 if (retval)
10855 return intr_mode;
10856 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
10857
James Smart3772a992009-05-22 14:50:54 -040010858 if (cfg_mode == 2) {
James Smartd2f25472021-01-04 10:02:27 -080010859 /* Now, try to enable MSI-X interrupt mode */
10860 retval = lpfc_sli_enable_msix(phba);
James Smart3772a992009-05-22 14:50:54 -040010861 if (!retval) {
James Smartd2f25472021-01-04 10:02:27 -080010862 /* Indicate initialization to MSI-X mode */
10863 phba->intr_type = MSIX;
10864 intr_mode = 2;
James Smart3772a992009-05-22 14:50:54 -040010865 }
10866 }
10867
10868 /* Fallback to MSI if MSI-X initialization failed */
10869 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10870 retval = lpfc_sli_enable_msi(phba);
10871 if (!retval) {
10872 /* Indicate initialization to MSI mode */
10873 phba->intr_type = MSI;
10874 intr_mode = 1;
10875 }
10876 }
10877
10878 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10879 if (phba->intr_type == NONE) {
10880 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10881 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10882 if (!retval) {
10883 /* Indicate initialization to INTx mode */
10884 phba->intr_type = INTx;
10885 intr_mode = 0;
10886 }
10887 }
10888 return intr_mode;
10889}
10890
10891/**
10892 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10893 * @phba: pointer to lpfc hba data structure.
10894 *
10895 * This routine is invoked to disable device interrupt and disassociate the
10896 * driver's interrupt handler(s) from interrupt vector(s) to device with
10897 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10898 * release the interrupt vector(s) for the message signaled interrupt.
10899 **/
10900static void
10901lpfc_sli_disable_intr(struct lpfc_hba *phba)
10902{
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010903 int nr_irqs, i;
10904
James Smart3772a992009-05-22 14:50:54 -040010905 if (phba->intr_type == MSIX)
Christoph Hellwig45ffac12017-02-12 13:52:26 -080010906 nr_irqs = LPFC_MSIX_VECTORS;
10907 else
10908 nr_irqs = 1;
10909
10910 for (i = 0; i < nr_irqs; i++)
10911 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10912 pci_free_irq_vectors(phba->pcidev);
James Smart3772a992009-05-22 14:50:54 -040010913
10914 /* Reset interrupt management states */
10915 phba->intr_type = NONE;
10916 phba->sli.slistat.sli_intr = 0;
James Smart3772a992009-05-22 14:50:54 -040010917}
10918
10919/**
James Smart657add42019-05-21 17:49:06 -070010920 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
James Smart6a828b02019-01-28 11:14:31 -080010921 * @phba: pointer to lpfc hba data structure.
10922 * @id: EQ vector index or Hardware Queue index
10923 * @match: LPFC_FIND_BY_EQ = match by EQ
10924 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
James Smart657add42019-05-21 17:49:06 -070010925 * Return the CPU that matches the selection criteria
James Smart6a828b02019-01-28 11:14:31 -080010926 */
10927static uint16_t
10928lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10929{
10930 struct lpfc_vector_map_info *cpup;
10931 int cpu;
10932
James Smart657add42019-05-21 17:49:06 -070010933 /* Loop through all CPUs */
James Smart222e9232019-01-28 11:14:35 -080010934 for_each_present_cpu(cpu) {
10935 cpup = &phba->sli4_hba.cpu_map[cpu];
James Smart657add42019-05-21 17:49:06 -070010936
10937 /* If we are matching by EQ, there may be multiple CPUs using
10938 * using the same vector, so select the one with
10939 * LPFC_CPU_FIRST_IRQ set.
10940 */
James Smart6a828b02019-01-28 11:14:31 -080010941 if ((match == LPFC_FIND_BY_EQ) &&
James Smart657add42019-05-21 17:49:06 -070010942 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
James Smart6a828b02019-01-28 11:14:31 -080010943 (cpup->eq == id))
10944 return cpu;
James Smart657add42019-05-21 17:49:06 -070010945
10946 /* If matching by HDWQ, select the first CPU that matches */
James Smart6a828b02019-01-28 11:14:31 -080010947 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10948 return cpu;
James Smart6a828b02019-01-28 11:14:31 -080010949 }
10950 return 0;
10951}
10952
James Smart6a828b02019-01-28 11:14:31 -080010953#ifdef CONFIG_X86
10954/**
10955 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10956 * @phba: pointer to lpfc hba data structure.
10957 * @cpu: CPU map index
10958 * @phys_id: CPU package physical id
10959 * @core_id: CPU core id
10960 */
10961static int
10962lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10963 uint16_t phys_id, uint16_t core_id)
10964{
10965 struct lpfc_vector_map_info *cpup;
10966 int idx;
10967
James Smart222e9232019-01-28 11:14:35 -080010968 for_each_present_cpu(idx) {
10969 cpup = &phba->sli4_hba.cpu_map[idx];
James Smart6a828b02019-01-28 11:14:31 -080010970 /* Does the cpup match the one we are looking for */
10971 if ((cpup->phys_id == phys_id) &&
10972 (cpup->core_id == core_id) &&
James Smart222e9232019-01-28 11:14:35 -080010973 (cpu != idx))
James Smart6a828b02019-01-28 11:14:31 -080010974 return 1;
James Smart6a828b02019-01-28 11:14:31 -080010975 }
10976 return 0;
10977}
10978#endif
10979
James Smartdcaa2132019-11-04 16:57:06 -080010980/*
10981 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10982 * @phba: pointer to lpfc hba data structure.
10983 * @eqidx: index for eq and irq vector
10984 * @flag: flags to set for vector_map structure
10985 * @cpu: cpu used to index vector_map structure
10986 *
10987 * The routine assigns eq info into vector_map structure
10988 */
10989static inline void
10990lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10991 unsigned int cpu)
10992{
10993 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10994 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10995
10996 cpup->eq = eqidx;
10997 cpup->flag |= flag;
10998
10999 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11000 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
11001 cpu, eqhdl->irq, cpup->eq, cpup->flag);
11002}
11003
11004/**
11005 * lpfc_cpu_map_array_init - Initialize cpu_map structure
11006 * @phba: pointer to lpfc hba data structure.
11007 *
11008 * The routine initializes the cpu_map array structure
11009 */
11010static void
11011lpfc_cpu_map_array_init(struct lpfc_hba *phba)
11012{
11013 struct lpfc_vector_map_info *cpup;
11014 struct lpfc_eq_intr_info *eqi;
11015 int cpu;
11016
11017 for_each_possible_cpu(cpu) {
11018 cpup = &phba->sli4_hba.cpu_map[cpu];
11019 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
11020 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
11021 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
11022 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
11023 cpup->flag = 0;
11024 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
11025 INIT_LIST_HEAD(&eqi->list);
11026 eqi->icnt = 0;
11027 }
11028}
11029
11030/**
11031 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
11032 * @phba: pointer to lpfc hba data structure.
11033 *
11034 * The routine initializes the hba_eq_hdl array structure
11035 */
11036static void
11037lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
11038{
11039 struct lpfc_hba_eq_hdl *eqhdl;
11040 int i;
11041
11042 for (i = 0; i < phba->cfg_irq_chann; i++) {
11043 eqhdl = lpfc_get_eq_hdl(i);
11044 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
11045 eqhdl->phba = phba;
11046 }
11047}
11048
James Smart6a828b02019-01-28 11:14:31 -080011049/**
James Smart895427b2017-02-12 13:52:30 -080011050 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
James Smart7bb03bb2013-04-17 20:19:16 -040011051 * @phba: pointer to lpfc hba data structure.
James Smart895427b2017-02-12 13:52:30 -080011052 * @vectors: number of msix vectors allocated.
James Smart7bb03bb2013-04-17 20:19:16 -040011053 *
James Smart895427b2017-02-12 13:52:30 -080011054 * The routine will figure out the CPU affinity assignment for every
James Smart6a828b02019-01-28 11:14:31 -080011055 * MSI-X vector allocated for the HBA.
James Smart895427b2017-02-12 13:52:30 -080011056 * In addition, the CPU to IO channel mapping will be calculated
11057 * and the phba->sli4_hba.cpu_map array will reflect this.
James Smart7bb03bb2013-04-17 20:19:16 -040011058 */
James Smart895427b2017-02-12 13:52:30 -080011059static void
11060lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
James Smart7bb03bb2013-04-17 20:19:16 -040011061{
James Smart3ad348d2019-08-14 16:56:43 -070011062 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
James Smart6a828b02019-01-28 11:14:31 -080011063 int max_phys_id, min_phys_id;
11064 int max_core_id, min_core_id;
James Smart7bb03bb2013-04-17 20:19:16 -040011065 struct lpfc_vector_map_info *cpup;
James Smartd9954a22019-05-21 17:49:05 -070011066 struct lpfc_vector_map_info *new_cpup;
James Smart7bb03bb2013-04-17 20:19:16 -040011067#ifdef CONFIG_X86
11068 struct cpuinfo_x86 *cpuinfo;
11069#endif
James Smart840eda92020-03-22 11:13:00 -070011070#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11071 struct lpfc_hdwq_stat *c_stat;
11072#endif
James Smart7bb03bb2013-04-17 20:19:16 -040011073
James Smart6a828b02019-01-28 11:14:31 -080011074 max_phys_id = 0;
James Smartd9954a22019-05-21 17:49:05 -070011075 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
James Smart6a828b02019-01-28 11:14:31 -080011076 max_core_id = 0;
James Smartd9954a22019-05-21 17:49:05 -070011077 min_core_id = LPFC_VECTOR_MAP_EMPTY;
James Smart7bb03bb2013-04-17 20:19:16 -040011078
11079 /* Update CPU map with physical id and core id of each CPU */
James Smart222e9232019-01-28 11:14:35 -080011080 for_each_present_cpu(cpu) {
11081 cpup = &phba->sli4_hba.cpu_map[cpu];
James Smart7bb03bb2013-04-17 20:19:16 -040011082#ifdef CONFIG_X86
11083 cpuinfo = &cpu_data(cpu);
11084 cpup->phys_id = cpuinfo->phys_proc_id;
11085 cpup->core_id = cpuinfo->cpu_core_id;
James Smartd9954a22019-05-21 17:49:05 -070011086 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
11087 cpup->flag |= LPFC_CPU_MAP_HYPER;
James Smart7bb03bb2013-04-17 20:19:16 -040011088#else
11089 /* No distinction between CPUs for other platforms */
11090 cpup->phys_id = 0;
James Smart6a828b02019-01-28 11:14:31 -080011091 cpup->core_id = cpu;
James Smart7bb03bb2013-04-17 20:19:16 -040011092#endif
James Smart7bb03bb2013-04-17 20:19:16 -040011093
James Smartb3295c22019-01-28 11:14:30 -080011094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart3ad348d2019-08-14 16:56:43 -070011095 "3328 CPU %d physid %d coreid %d flag x%x\n",
11096 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
James Smart6a828b02019-01-28 11:14:31 -080011097
11098 if (cpup->phys_id > max_phys_id)
11099 max_phys_id = cpup->phys_id;
11100 if (cpup->phys_id < min_phys_id)
11101 min_phys_id = cpup->phys_id;
11102
11103 if (cpup->core_id > max_core_id)
11104 max_core_id = cpup->core_id;
11105 if (cpup->core_id < min_core_id)
11106 min_core_id = cpup->core_id;
James Smart7bb03bb2013-04-17 20:19:16 -040011107 }
James Smartb3295c22019-01-28 11:14:30 -080011108
James Smartd9954a22019-05-21 17:49:05 -070011109 /* After looking at each irq vector assigned to this pcidev, its
11110 * possible to see that not ALL CPUs have been accounted for.
James Smart657add42019-05-21 17:49:06 -070011111 * Next we will set any unassigned (unaffinitized) cpu map
11112 * entries to a IRQ on the same phys_id.
James Smartd9954a22019-05-21 17:49:05 -070011113 */
11114 first_cpu = cpumask_first(cpu_present_mask);
11115 start_cpu = first_cpu;
11116
11117 for_each_present_cpu(cpu) {
11118 cpup = &phba->sli4_hba.cpu_map[cpu];
11119
11120 /* Is this CPU entry unassigned */
11121 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11122 /* Mark CPU as IRQ not assigned by the kernel */
11123 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11124
James Smart657add42019-05-21 17:49:06 -070011125 /* If so, find a new_cpup thats on the the SAME
James Smartd9954a22019-05-21 17:49:05 -070011126 * phys_id as cpup. start_cpu will start where we
11127 * left off so all unassigned entries don't get assgined
11128 * the IRQ of the first entry.
11129 */
11130 new_cpu = start_cpu;
11131 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11132 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11133 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
James Smartdcaa2132019-11-04 16:57:06 -080011134 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
James Smartd9954a22019-05-21 17:49:05 -070011135 (new_cpup->phys_id == cpup->phys_id))
11136 goto found_same;
11137 new_cpu = cpumask_next(
11138 new_cpu, cpu_present_mask);
11139 if (new_cpu == nr_cpumask_bits)
11140 new_cpu = first_cpu;
11141 }
11142 /* At this point, we leave the CPU as unassigned */
11143 continue;
11144found_same:
11145 /* We found a matching phys_id, so copy the IRQ info */
11146 cpup->eq = new_cpup->eq;
James Smartd9954a22019-05-21 17:49:05 -070011147
11148 /* Bump start_cpu to the next slot to minmize the
11149 * chance of having multiple unassigned CPU entries
11150 * selecting the same IRQ.
11151 */
11152 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11153 if (start_cpu == nr_cpumask_bits)
11154 start_cpu = first_cpu;
11155
James Smart657add42019-05-21 17:49:06 -070011156 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smartd9954a22019-05-21 17:49:05 -070011157 "3337 Set Affinity: CPU %d "
James Smartdcaa2132019-11-04 16:57:06 -080011158 "eq %d from peer cpu %d same "
James Smartd9954a22019-05-21 17:49:05 -070011159 "phys_id (%d)\n",
James Smartdcaa2132019-11-04 16:57:06 -080011160 cpu, cpup->eq, new_cpu,
11161 cpup->phys_id);
James Smartd9954a22019-05-21 17:49:05 -070011162 }
11163 }
11164
11165 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
11166 start_cpu = first_cpu;
11167
11168 for_each_present_cpu(cpu) {
11169 cpup = &phba->sli4_hba.cpu_map[cpu];
11170
11171 /* Is this entry unassigned */
11172 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11173 /* Mark it as IRQ not assigned by the kernel */
11174 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11175
James Smart657add42019-05-21 17:49:06 -070011176 /* If so, find a new_cpup thats on ANY phys_id
James Smartd9954a22019-05-21 17:49:05 -070011177 * as the cpup. start_cpu will start where we
11178 * left off so all unassigned entries don't get
11179 * assigned the IRQ of the first entry.
11180 */
11181 new_cpu = start_cpu;
11182 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11183 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11184 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
James Smartdcaa2132019-11-04 16:57:06 -080011185 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
James Smartd9954a22019-05-21 17:49:05 -070011186 goto found_any;
11187 new_cpu = cpumask_next(
11188 new_cpu, cpu_present_mask);
11189 if (new_cpu == nr_cpumask_bits)
11190 new_cpu = first_cpu;
11191 }
11192 /* We should never leave an entry unassigned */
11193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11194 "3339 Set Affinity: CPU %d "
James Smartdcaa2132019-11-04 16:57:06 -080011195 "eq %d UNASSIGNED\n",
11196 cpup->hdwq, cpup->eq);
James Smartd9954a22019-05-21 17:49:05 -070011197 continue;
11198found_any:
11199 /* We found an available entry, copy the IRQ info */
11200 cpup->eq = new_cpup->eq;
James Smartd9954a22019-05-21 17:49:05 -070011201
11202 /* Bump start_cpu to the next slot to minmize the
11203 * chance of having multiple unassigned CPU entries
11204 * selecting the same IRQ.
11205 */
11206 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11207 if (start_cpu == nr_cpumask_bits)
11208 start_cpu = first_cpu;
11209
James Smart657add42019-05-21 17:49:06 -070011210 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smartd9954a22019-05-21 17:49:05 -070011211 "3338 Set Affinity: CPU %d "
James Smartdcaa2132019-11-04 16:57:06 -080011212 "eq %d from peer cpu %d (%d/%d)\n",
11213 cpu, cpup->eq, new_cpu,
James Smartd9954a22019-05-21 17:49:05 -070011214 new_cpup->phys_id, new_cpup->core_id);
11215 }
11216 }
James Smart657add42019-05-21 17:49:06 -070011217
James Smart3ad348d2019-08-14 16:56:43 -070011218 /* Assign hdwq indices that are unique across all cpus in the map
11219 * that are also FIRST_CPUs.
11220 */
11221 idx = 0;
11222 for_each_present_cpu(cpu) {
11223 cpup = &phba->sli4_hba.cpu_map[cpu];
11224
11225 /* Only FIRST IRQs get a hdwq index assignment. */
11226 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11227 continue;
11228
11229 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11230 cpup->hdwq = idx;
11231 idx++;
Anton Blanchardbc2736e2020-07-13 18:39:08 +100011232 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart3ad348d2019-08-14 16:56:43 -070011233 "3333 Set Affinity: CPU %d (phys %d core %d): "
James Smartdcaa2132019-11-04 16:57:06 -080011234 "hdwq %d eq %d flg x%x\n",
James Smart3ad348d2019-08-14 16:56:43 -070011235 cpu, cpup->phys_id, cpup->core_id,
James Smartdcaa2132019-11-04 16:57:06 -080011236 cpup->hdwq, cpup->eq, cpup->flag);
James Smart3ad348d2019-08-14 16:56:43 -070011237 }
James Smartbc227dd2019-11-11 15:03:59 -080011238 /* Associate a hdwq with each cpu_map entry
James Smart657add42019-05-21 17:49:06 -070011239 * This will be 1 to 1 - hdwq to cpu, unless there are less
11240 * hardware queues then CPUs. For that case we will just round-robin
11241 * the available hardware queues as they get assigned to CPUs.
James Smart3ad348d2019-08-14 16:56:43 -070011242 * The next_idx is the idx from the FIRST_CPU loop above to account
11243 * for irq_chann < hdwq. The idx is used for round-robin assignments
11244 * and needs to start at 0.
James Smart657add42019-05-21 17:49:06 -070011245 */
James Smart3ad348d2019-08-14 16:56:43 -070011246 next_idx = idx;
James Smart657add42019-05-21 17:49:06 -070011247 start_cpu = 0;
James Smart3ad348d2019-08-14 16:56:43 -070011248 idx = 0;
James Smart657add42019-05-21 17:49:06 -070011249 for_each_present_cpu(cpu) {
11250 cpup = &phba->sli4_hba.cpu_map[cpu];
James Smart657add42019-05-21 17:49:06 -070011251
James Smart3ad348d2019-08-14 16:56:43 -070011252 /* FIRST cpus are already mapped. */
11253 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11254 continue;
James Smart657add42019-05-21 17:49:06 -070011255
James Smart3ad348d2019-08-14 16:56:43 -070011256 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11257 * of the unassigned cpus to the next idx so that all
11258 * hdw queues are fully utilized.
11259 */
11260 if (next_idx < phba->cfg_hdw_queue) {
11261 cpup->hdwq = next_idx;
11262 next_idx++;
11263 continue;
James Smart657add42019-05-21 17:49:06 -070011264 }
James Smart3ad348d2019-08-14 16:56:43 -070011265
11266 /* Not a First CPU and all hdw_queues are used. Reuse a
11267 * Hardware Queue for another CPU, so be smart about it
11268 * and pick one that has its IRQ/EQ mapped to the same phys_id
11269 * (CPU package) and core_id.
11270 */
11271 new_cpu = start_cpu;
11272 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11273 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11274 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11275 new_cpup->phys_id == cpup->phys_id &&
11276 new_cpup->core_id == cpup->core_id) {
11277 goto found_hdwq;
11278 }
11279 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11280 if (new_cpu == nr_cpumask_bits)
11281 new_cpu = first_cpu;
11282 }
11283
11284 /* If we can't match both phys_id and core_id,
11285 * settle for just a phys_id match.
11286 */
11287 new_cpu = start_cpu;
11288 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11289 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11290 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11291 new_cpup->phys_id == cpup->phys_id)
11292 goto found_hdwq;
11293
11294 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11295 if (new_cpu == nr_cpumask_bits)
11296 new_cpu = first_cpu;
11297 }
11298
11299 /* Otherwise just round robin on cfg_hdw_queue */
11300 cpup->hdwq = idx % phba->cfg_hdw_queue;
11301 idx++;
11302 goto logit;
11303 found_hdwq:
11304 /* We found an available entry, copy the IRQ info */
11305 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11306 if (start_cpu == nr_cpumask_bits)
11307 start_cpu = first_cpu;
11308 cpup->hdwq = new_cpup->hdwq;
11309 logit:
Anton Blanchardbc2736e2020-07-13 18:39:08 +100011310 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart657add42019-05-21 17:49:06 -070011311 "3335 Set Affinity: CPU %d (phys %d core %d): "
James Smartdcaa2132019-11-04 16:57:06 -080011312 "hdwq %d eq %d flg x%x\n",
James Smart657add42019-05-21 17:49:06 -070011313 cpu, cpup->phys_id, cpup->core_id,
James Smartdcaa2132019-11-04 16:57:06 -080011314 cpup->hdwq, cpup->eq, cpup->flag);
James Smart657add42019-05-21 17:49:06 -070011315 }
11316
James Smartbc227dd2019-11-11 15:03:59 -080011317 /*
11318 * Initialize the cpu_map slots for not-present cpus in case
11319 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11320 */
11321 idx = 0;
11322 for_each_possible_cpu(cpu) {
11323 cpup = &phba->sli4_hba.cpu_map[cpu];
James Smart840eda92020-03-22 11:13:00 -070011324#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11325 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11326 c_stat->hdwq_no = cpup->hdwq;
11327#endif
James Smartbc227dd2019-11-11 15:03:59 -080011328 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11329 continue;
11330
11331 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
James Smart840eda92020-03-22 11:13:00 -070011332#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11333 c_stat->hdwq_no = cpup->hdwq;
11334#endif
James Smartbc227dd2019-11-11 15:03:59 -080011335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11336 "3340 Set Affinity: not present "
11337 "CPU %d hdwq %d\n",
11338 cpu, cpup->hdwq);
James Smart657add42019-05-21 17:49:06 -070011339 }
11340
11341 /* The cpu_map array will be used later during initialization
11342 * when EQ / CQ / WQs are allocated and configured.
11343 */
James Smartb3295c22019-01-28 11:14:30 -080011344 return;
James Smart7bb03bb2013-04-17 20:19:16 -040011345}
James Smart7bb03bb2013-04-17 20:19:16 -040011346
11347/**
James Smart93a4d6f2019-11-04 16:57:05 -080011348 * lpfc_cpuhp_get_eq
11349 *
11350 * @phba: pointer to lpfc hba data structure.
11351 * @cpu: cpu going offline
Lee Jonesfe614ac2020-07-23 13:24:22 +010011352 * @eqlist: eq list to append to
James Smart93a4d6f2019-11-04 16:57:05 -080011353 */
James Smarta99c8072020-01-27 16:23:06 -080011354static int
James Smart93a4d6f2019-11-04 16:57:05 -080011355lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11356 struct list_head *eqlist)
11357{
James Smart93a4d6f2019-11-04 16:57:05 -080011358 const struct cpumask *maskp;
11359 struct lpfc_queue *eq;
James Smarta99c8072020-01-27 16:23:06 -080011360 struct cpumask *tmp;
James Smart93a4d6f2019-11-04 16:57:05 -080011361 u16 idx;
11362
James Smarta99c8072020-01-27 16:23:06 -080011363 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11364 if (!tmp)
11365 return -ENOMEM;
11366
James Smart93a4d6f2019-11-04 16:57:05 -080011367 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11368 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11369 if (!maskp)
11370 continue;
11371 /*
11372 * if irq is not affinitized to the cpu going
11373 * then we don't need to poll the eq attached
11374 * to it.
11375 */
James Smarta99c8072020-01-27 16:23:06 -080011376 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
James Smart93a4d6f2019-11-04 16:57:05 -080011377 continue;
11378 /* get the cpus that are online and are affini-
11379 * tized to this irq vector. If the count is
11380 * more than 1 then cpuhp is not going to shut-
11381 * down this vector. Since this cpu has not
11382 * gone offline yet, we need >1.
11383 */
James Smarta99c8072020-01-27 16:23:06 -080011384 cpumask_and(tmp, maskp, cpu_online_mask);
11385 if (cpumask_weight(tmp) > 1)
James Smart93a4d6f2019-11-04 16:57:05 -080011386 continue;
11387
11388 /* Now that we have an irq to shutdown, get the eq
11389 * mapped to this irq. Note: multiple hdwq's in
11390 * the software can share an eq, but eventually
11391 * only eq will be mapped to this vector
11392 */
James Smartdcaa2132019-11-04 16:57:06 -080011393 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11394 list_add(&eq->_poll_list, eqlist);
James Smart93a4d6f2019-11-04 16:57:05 -080011395 }
James Smarta99c8072020-01-27 16:23:06 -080011396 kfree(tmp);
11397 return 0;
James Smart93a4d6f2019-11-04 16:57:05 -080011398}
11399
11400static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11401{
11402 if (phba->sli_rev != LPFC_SLI_REV4)
11403 return;
11404
11405 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11406 &phba->cpuhp);
11407 /*
11408 * unregistering the instance doesn't stop the polling
11409 * timer. Wait for the poll timer to retire.
11410 */
11411 synchronize_rcu();
11412 del_timer_sync(&phba->cpuhp_poll_timer);
11413}
11414
11415static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11416{
11417 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11418 return;
11419
11420 __lpfc_cpuhp_remove(phba);
11421}
11422
11423static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11424{
11425 if (phba->sli_rev != LPFC_SLI_REV4)
11426 return;
11427
11428 rcu_read_lock();
11429
James Smartf861f592020-03-22 11:12:54 -070011430 if (!list_empty(&phba->poll_list))
James Smart93a4d6f2019-11-04 16:57:05 -080011431 mod_timer(&phba->cpuhp_poll_timer,
11432 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
James Smart93a4d6f2019-11-04 16:57:05 -080011433
11434 rcu_read_unlock();
11435
11436 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11437 &phba->cpuhp);
11438}
11439
11440static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11441{
11442 if (phba->pport->load_flag & FC_UNLOADING) {
11443 *retval = -EAGAIN;
11444 return true;
11445 }
11446
11447 if (phba->sli_rev != LPFC_SLI_REV4) {
11448 *retval = 0;
11449 return true;
11450 }
11451
11452 /* proceed with the hotplug */
11453 return false;
11454}
11455
James Smartdcaa2132019-11-04 16:57:06 -080011456/**
11457 * lpfc_irq_set_aff - set IRQ affinity
11458 * @eqhdl: EQ handle
11459 * @cpu: cpu to set affinity
11460 *
11461 **/
11462static inline void
11463lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11464{
11465 cpumask_clear(&eqhdl->aff_mask);
11466 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11467 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11468 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11469}
11470
11471/**
11472 * lpfc_irq_clear_aff - clear IRQ affinity
11473 * @eqhdl: EQ handle
11474 *
11475 **/
11476static inline void
11477lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11478{
11479 cpumask_clear(&eqhdl->aff_mask);
11480 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
James Smartdcaa2132019-11-04 16:57:06 -080011481}
11482
11483/**
11484 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11485 * @phba: pointer to HBA context object.
11486 * @cpu: cpu going offline/online
11487 * @offline: true, cpu is going offline. false, cpu is coming online.
11488 *
11489 * If cpu is going offline, we'll try our best effort to find the next
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011490 * online cpu on the phba's original_mask and migrate all offlining IRQ
11491 * affinities.
James Smartdcaa2132019-11-04 16:57:06 -080011492 *
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011493 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
James Smartdcaa2132019-11-04 16:57:06 -080011494 *
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011495 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
James Smartdcaa2132019-11-04 16:57:06 -080011496 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11497 *
11498 **/
11499static void
11500lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11501{
11502 struct lpfc_vector_map_info *cpup;
11503 struct cpumask *aff_mask;
11504 unsigned int cpu_select, cpu_next, idx;
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011505 const struct cpumask *orig_mask;
James Smartdcaa2132019-11-04 16:57:06 -080011506
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011507 if (phba->irq_chann_mode == NORMAL_MODE)
James Smartdcaa2132019-11-04 16:57:06 -080011508 return;
11509
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011510 orig_mask = &phba->sli4_hba.irq_aff_mask;
James Smartdcaa2132019-11-04 16:57:06 -080011511
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011512 if (!cpumask_test_cpu(cpu, orig_mask))
James Smartdcaa2132019-11-04 16:57:06 -080011513 return;
11514
11515 cpup = &phba->sli4_hba.cpu_map[cpu];
11516
11517 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11518 return;
11519
11520 if (offline) {
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011521 /* Find next online CPU on original mask */
11522 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11523 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
James Smartdcaa2132019-11-04 16:57:06 -080011524
11525 /* Found a valid CPU */
11526 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11527 /* Go through each eqhdl and ensure offlining
11528 * cpu aff_mask is migrated
11529 */
11530 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11531 aff_mask = lpfc_get_aff_mask(idx);
11532
11533 /* Migrate affinity */
11534 if (cpumask_test_cpu(cpu, aff_mask))
11535 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11536 cpu_select);
11537 }
11538 } else {
11539 /* Rely on irqbalance if no online CPUs left on NUMA */
11540 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11541 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11542 }
11543 } else {
11544 /* Migrate affinity back to this CPU */
11545 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11546 }
11547}
11548
James Smart93a4d6f2019-11-04 16:57:05 -080011549static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11550{
11551 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11552 struct lpfc_queue *eq, *next;
11553 LIST_HEAD(eqlist);
11554 int retval;
11555
11556 if (!phba) {
11557 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11558 return 0;
11559 }
11560
11561 if (__lpfc_cpuhp_checks(phba, &retval))
11562 return retval;
11563
James Smartdcaa2132019-11-04 16:57:06 -080011564 lpfc_irq_rebalance(phba, cpu, true);
11565
James Smarta99c8072020-01-27 16:23:06 -080011566 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11567 if (retval)
11568 return retval;
James Smart93a4d6f2019-11-04 16:57:05 -080011569
11570 /* start polling on these eq's */
11571 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11572 list_del_init(&eq->_poll_list);
11573 lpfc_sli4_start_polling(eq);
11574 }
11575
11576 return 0;
11577}
11578
11579static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11580{
11581 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11582 struct lpfc_queue *eq, *next;
11583 unsigned int n;
11584 int retval;
11585
11586 if (!phba) {
11587 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11588 return 0;
11589 }
11590
11591 if (__lpfc_cpuhp_checks(phba, &retval))
11592 return retval;
11593
James Smartdcaa2132019-11-04 16:57:06 -080011594 lpfc_irq_rebalance(phba, cpu, false);
11595
James Smart93a4d6f2019-11-04 16:57:05 -080011596 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11597 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11598 if (n == cpu)
11599 lpfc_sli4_stop_polling(eq);
11600 }
11601
11602 return 0;
11603}
11604
11605/**
James Smartda0436e2009-05-22 14:51:39 -040011606 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11607 * @phba: pointer to lpfc hba data structure.
11608 *
11609 * This routine is invoked to enable the MSI-X interrupt vectors to device
James Smartdcaa2132019-11-04 16:57:06 -080011610 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
11611 * to cpus on the system.
11612 *
11613 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11614 * the number of cpus on the same numa node as this adapter. The vectors are
11615 * allocated without requesting OS affinity mapping. A vector will be
11616 * allocated and assigned to each online and offline cpu. If the cpu is
11617 * online, then affinity will be set to that cpu. If the cpu is offline, then
11618 * affinity will be set to the nearest peer cpu within the numa node that is
11619 * online. If there are no online cpus within the numa node, affinity is not
11620 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11621 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11622 * configured.
11623 *
11624 * If numa mode is not enabled and there is more than 1 vector allocated, then
11625 * the driver relies on the managed irq interface where the OS assigns vector to
11626 * cpu affinity. The driver will then use that affinity mapping to setup its
11627 * cpu mapping table.
James Smartda0436e2009-05-22 14:51:39 -040011628 *
11629 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020011630 * 0 - successful
James Smartda0436e2009-05-22 14:51:39 -040011631 * other values - error
11632 **/
11633static int
11634lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11635{
James Smart75baf692010-06-08 18:31:21 -040011636 int vectors, rc, index;
James Smartb83d0052017-06-01 21:07:05 -070011637 char *name;
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011638 const struct cpumask *aff_mask = NULL;
James Smartdcaa2132019-11-04 16:57:06 -080011639 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
Dick Kennedy17105d92020-07-06 13:42:30 -070011640 struct lpfc_vector_map_info *cpup;
James Smartdcaa2132019-11-04 16:57:06 -080011641 struct lpfc_hba_eq_hdl *eqhdl;
11642 const struct cpumask *maskp;
James Smartdcaa2132019-11-04 16:57:06 -080011643 unsigned int flags = PCI_IRQ_MSIX;
James Smartda0436e2009-05-22 14:51:39 -040011644
11645 /* Set up MSI-X multi-message vectors */
James Smart6a828b02019-01-28 11:14:31 -080011646 vectors = phba->cfg_irq_chann;
Christoph Hellwig45ffac12017-02-12 13:52:26 -080011647
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011648 if (phba->irq_chann_mode != NORMAL_MODE)
11649 aff_mask = &phba->sli4_hba.irq_aff_mask;
11650
11651 if (aff_mask) {
11652 cpu_cnt = cpumask_weight(aff_mask);
James Smartdcaa2132019-11-04 16:57:06 -080011653 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11654
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011655 /* cpu: iterates over aff_mask including offline or online
11656 * cpu_select: iterates over online aff_mask to set affinity
James Smartdcaa2132019-11-04 16:57:06 -080011657 */
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011658 cpu = cpumask_first(aff_mask);
11659 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
James Smartdcaa2132019-11-04 16:57:06 -080011660 } else {
11661 flags |= PCI_IRQ_AFFINITY;
11662 }
11663
11664 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
Alexander Gordeev4f871e12014-09-03 12:56:29 -040011665 if (rc < 0) {
James Smartda0436e2009-05-22 14:51:39 -040011666 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11667 "0484 PCI enable MSI-X failed (%d)\n", rc);
Alexander Gordeev029165a2014-07-16 20:05:15 +020011668 goto vec_fail_out;
James Smartda0436e2009-05-22 14:51:39 -040011669 }
Alexander Gordeev4f871e12014-09-03 12:56:29 -040011670 vectors = rc;
James Smart75baf692010-06-08 18:31:21 -040011671
James Smart7bb03bb2013-04-17 20:19:16 -040011672 /* Assign MSI-X vectors to interrupt handlers */
James Smart67d12732012-08-03 12:36:13 -040011673 for (index = 0; index < vectors; index++) {
James Smartdcaa2132019-11-04 16:57:06 -080011674 eqhdl = lpfc_get_eq_hdl(index);
11675 name = eqhdl->handler_name;
James Smartb83d0052017-06-01 21:07:05 -070011676 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11677 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
James Smart4305f182012-08-03 12:36:33 -040011678 LPFC_DRIVER_HANDLER_NAME"%d", index);
James Smartda0436e2009-05-22 14:51:39 -040011679
James Smartdcaa2132019-11-04 16:57:06 -080011680 eqhdl->idx = index;
James Smart7370d102019-01-28 11:14:20 -080011681 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11682 &lpfc_sli4_hba_intr_handler, 0,
James Smartdcaa2132019-11-04 16:57:06 -080011683 name, eqhdl);
James Smartda0436e2009-05-22 14:51:39 -040011684 if (rc) {
11685 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11686 "0486 MSI-X fast-path (%d) "
11687 "request_irq failed (%d)\n", index, rc);
11688 goto cfg_fail_out;
11689 }
James Smartdcaa2132019-11-04 16:57:06 -080011690
11691 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11692
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011693 if (aff_mask) {
James Smartdcaa2132019-11-04 16:57:06 -080011694 /* If found a neighboring online cpu, set affinity */
11695 if (cpu_select < nr_cpu_ids)
11696 lpfc_irq_set_aff(eqhdl, cpu_select);
11697
11698 /* Assign EQ to cpu_map */
11699 lpfc_assign_eq_map_info(phba, index,
11700 LPFC_CPU_FIRST_IRQ,
11701 cpu);
11702
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011703 /* Iterate to next offline or online cpu in aff_mask */
11704 cpu = cpumask_next(cpu, aff_mask);
James Smartdcaa2132019-11-04 16:57:06 -080011705
Dick Kennedy3048e3e2020-05-01 14:43:06 -070011706 /* Find next online cpu in aff_mask to set affinity */
11707 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
James Smartdcaa2132019-11-04 16:57:06 -080011708 } else if (vectors == 1) {
11709 cpu = cpumask_first(cpu_present_mask);
11710 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11711 cpu);
11712 } else {
11713 maskp = pci_irq_get_affinity(phba->pcidev, index);
11714
James Smartdcaa2132019-11-04 16:57:06 -080011715 /* Loop through all CPUs associated with vector index */
11716 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
Dick Kennedy17105d92020-07-06 13:42:30 -070011717 cpup = &phba->sli4_hba.cpu_map[cpu];
11718
James Smartdcaa2132019-11-04 16:57:06 -080011719 /* If this is the first CPU thats assigned to
11720 * this vector, set LPFC_CPU_FIRST_IRQ.
Dick Kennedy17105d92020-07-06 13:42:30 -070011721 *
11722 * With certain platforms its possible that irq
11723 * vectors are affinitized to all the cpu's.
11724 * This can result in each cpu_map.eq to be set
11725 * to the last vector, resulting in overwrite
11726 * of all the previous cpu_map.eq. Ensure that
11727 * each vector receives a place in cpu_map.
11728 * Later call to lpfc_cpu_affinity_check will
11729 * ensure we are nicely balanced out.
James Smartdcaa2132019-11-04 16:57:06 -080011730 */
Dick Kennedy17105d92020-07-06 13:42:30 -070011731 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11732 continue;
James Smartdcaa2132019-11-04 16:57:06 -080011733 lpfc_assign_eq_map_info(phba, index,
Dick Kennedy17105d92020-07-06 13:42:30 -070011734 LPFC_CPU_FIRST_IRQ,
James Smartdcaa2132019-11-04 16:57:06 -080011735 cpu);
Dick Kennedy17105d92020-07-06 13:42:30 -070011736 break;
James Smartdcaa2132019-11-04 16:57:06 -080011737 }
11738 }
James Smartda0436e2009-05-22 14:51:39 -040011739 }
11740
James Smart6a828b02019-01-28 11:14:31 -080011741 if (vectors != phba->cfg_irq_chann) {
Dick Kennedy372c1872020-06-30 14:50:00 -070011742 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart82c3e9b2012-09-29 11:29:50 -040011743 "3238 Reducing IO channels to match number of "
11744 "MSI-X vectors, requested %d got %d\n",
James Smart6a828b02019-01-28 11:14:31 -080011745 phba->cfg_irq_chann, vectors);
11746 if (phba->cfg_irq_chann > vectors)
11747 phba->cfg_irq_chann = vectors;
James Smart82c3e9b2012-09-29 11:29:50 -040011748 }
James Smart7bb03bb2013-04-17 20:19:16 -040011749
James Smartda0436e2009-05-22 14:51:39 -040011750 return rc;
11751
11752cfg_fail_out:
11753 /* free the irq already requested */
James Smartdcaa2132019-11-04 16:57:06 -080011754 for (--index; index >= 0; index--) {
11755 eqhdl = lpfc_get_eq_hdl(index);
11756 lpfc_irq_clear_aff(eqhdl);
11757 irq_set_affinity_hint(eqhdl->irq, NULL);
11758 free_irq(eqhdl->irq, eqhdl);
11759 }
James Smartda0436e2009-05-22 14:51:39 -040011760
James Smartda0436e2009-05-22 14:51:39 -040011761 /* Unconfigure MSI-X capability structure */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080011762 pci_free_irq_vectors(phba->pcidev);
Alexander Gordeev029165a2014-07-16 20:05:15 +020011763
11764vec_fail_out:
James Smartda0436e2009-05-22 14:51:39 -040011765 return rc;
11766}
11767
11768/**
James Smartda0436e2009-05-22 14:51:39 -040011769 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11770 * @phba: pointer to lpfc hba data structure.
11771 *
11772 * This routine is invoked to enable the MSI interrupt mode to device with
James Smart07b1b912019-08-14 16:56:58 -070011773 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11774 * called to enable the MSI vector. The device driver is responsible for
11775 * calling the request_irq() to register MSI vector with a interrupt the
11776 * handler, which is done in this function.
James Smartda0436e2009-05-22 14:51:39 -040011777 *
11778 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020011779 * 0 - successful
James Smartda0436e2009-05-22 14:51:39 -040011780 * other values - error
11781 **/
11782static int
11783lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11784{
11785 int rc, index;
James Smartdcaa2132019-11-04 16:57:06 -080011786 unsigned int cpu;
11787 struct lpfc_hba_eq_hdl *eqhdl;
James Smartda0436e2009-05-22 14:51:39 -040011788
James Smart07b1b912019-08-14 16:56:58 -070011789 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11790 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11791 if (rc > 0)
James Smartda0436e2009-05-22 14:51:39 -040011792 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11793 "0487 PCI enable MSI mode success.\n");
11794 else {
11795 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11796 "0488 PCI enable MSI mode failed (%d)\n", rc);
James Smart07b1b912019-08-14 16:56:58 -070011797 return rc ? rc : -1;
James Smartda0436e2009-05-22 14:51:39 -040011798 }
11799
11800 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
James Smarted243d32015-05-21 13:55:25 -040011801 0, LPFC_DRIVER_NAME, phba);
James Smartda0436e2009-05-22 14:51:39 -040011802 if (rc) {
James Smart07b1b912019-08-14 16:56:58 -070011803 pci_free_irq_vectors(phba->pcidev);
James Smartda0436e2009-05-22 14:51:39 -040011804 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11805 "0490 MSI request_irq failed (%d)\n", rc);
James Smart75baf692010-06-08 18:31:21 -040011806 return rc;
James Smartda0436e2009-05-22 14:51:39 -040011807 }
11808
James Smartdcaa2132019-11-04 16:57:06 -080011809 eqhdl = lpfc_get_eq_hdl(0);
11810 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11811
11812 cpu = cpumask_first(cpu_present_mask);
11813 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11814
James Smart6a828b02019-01-28 11:14:31 -080011815 for (index = 0; index < phba->cfg_irq_chann; index++) {
James Smartdcaa2132019-11-04 16:57:06 -080011816 eqhdl = lpfc_get_eq_hdl(index);
11817 eqhdl->idx = index;
James Smartda0436e2009-05-22 14:51:39 -040011818 }
11819
James Smart75baf692010-06-08 18:31:21 -040011820 return 0;
James Smartda0436e2009-05-22 14:51:39 -040011821}
11822
11823/**
James Smartda0436e2009-05-22 14:51:39 -040011824 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11825 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +010011826 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
James Smartda0436e2009-05-22 14:51:39 -040011827 *
11828 * This routine is invoked to enable device interrupt and associate driver's
11829 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11830 * interface spec. Depends on the interrupt mode configured to the driver,
11831 * the driver will try to fallback from the configured interrupt mode to an
11832 * interrupt mode which is supported by the platform, kernel, and device in
11833 * the order of:
11834 * MSI-X -> MSI -> IRQ.
11835 *
11836 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020011837 * 0 - successful
James Smartda0436e2009-05-22 14:51:39 -040011838 * other values - error
11839 **/
11840static uint32_t
11841lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11842{
11843 uint32_t intr_mode = LPFC_INTR_ERROR;
James Smart895427b2017-02-12 13:52:30 -080011844 int retval, idx;
James Smartda0436e2009-05-22 14:51:39 -040011845
11846 if (cfg_mode == 2) {
11847 /* Preparation before conf_msi mbox cmd */
11848 retval = 0;
11849 if (!retval) {
11850 /* Now, try to enable MSI-X interrupt mode */
11851 retval = lpfc_sli4_enable_msix(phba);
11852 if (!retval) {
11853 /* Indicate initialization to MSI-X mode */
11854 phba->intr_type = MSIX;
11855 intr_mode = 2;
11856 }
11857 }
11858 }
11859
11860 /* Fallback to MSI if MSI-X initialization failed */
11861 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11862 retval = lpfc_sli4_enable_msi(phba);
11863 if (!retval) {
11864 /* Indicate initialization to MSI mode */
11865 phba->intr_type = MSI;
11866 intr_mode = 1;
11867 }
11868 }
11869
11870 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11871 if (phba->intr_type == NONE) {
11872 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11873 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11874 if (!retval) {
James Smart895427b2017-02-12 13:52:30 -080011875 struct lpfc_hba_eq_hdl *eqhdl;
James Smartdcaa2132019-11-04 16:57:06 -080011876 unsigned int cpu;
James Smart895427b2017-02-12 13:52:30 -080011877
James Smartda0436e2009-05-22 14:51:39 -040011878 /* Indicate initialization to INTx mode */
11879 phba->intr_type = INTx;
11880 intr_mode = 0;
James Smart895427b2017-02-12 13:52:30 -080011881
James Smartdcaa2132019-11-04 16:57:06 -080011882 eqhdl = lpfc_get_eq_hdl(0);
11883 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11884
11885 cpu = cpumask_first(cpu_present_mask);
11886 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11887 cpu);
James Smart6a828b02019-01-28 11:14:31 -080011888 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
James Smartdcaa2132019-11-04 16:57:06 -080011889 eqhdl = lpfc_get_eq_hdl(idx);
James Smart895427b2017-02-12 13:52:30 -080011890 eqhdl->idx = idx;
James Smart1ba981f2014-02-20 09:56:45 -050011891 }
James Smartda0436e2009-05-22 14:51:39 -040011892 }
11893 }
11894 return intr_mode;
11895}
11896
11897/**
11898 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11899 * @phba: pointer to lpfc hba data structure.
11900 *
11901 * This routine is invoked to disable device interrupt and disassociate
11902 * the driver's interrupt handler(s) from interrupt vector(s) to device
11903 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11904 * will release the interrupt vector(s) for the message signaled interrupt.
11905 **/
11906static void
11907lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11908{
11909 /* Disable the currently initialized interrupt mode */
Christoph Hellwig45ffac12017-02-12 13:52:26 -080011910 if (phba->intr_type == MSIX) {
11911 int index;
James Smartdcaa2132019-11-04 16:57:06 -080011912 struct lpfc_hba_eq_hdl *eqhdl;
Christoph Hellwig45ffac12017-02-12 13:52:26 -080011913
11914 /* Free up MSI-X multi-message vectors */
James Smart6a828b02019-01-28 11:14:31 -080011915 for (index = 0; index < phba->cfg_irq_chann; index++) {
James Smartdcaa2132019-11-04 16:57:06 -080011916 eqhdl = lpfc_get_eq_hdl(index);
11917 lpfc_irq_clear_aff(eqhdl);
11918 irq_set_affinity_hint(eqhdl->irq, NULL);
11919 free_irq(eqhdl->irq, eqhdl);
James Smartb3295c22019-01-28 11:14:30 -080011920 }
Christoph Hellwig45ffac12017-02-12 13:52:26 -080011921 } else {
James Smartda0436e2009-05-22 14:51:39 -040011922 free_irq(phba->pcidev->irq, phba);
Christoph Hellwig45ffac12017-02-12 13:52:26 -080011923 }
11924
11925 pci_free_irq_vectors(phba->pcidev);
James Smartda0436e2009-05-22 14:51:39 -040011926
11927 /* Reset interrupt management states */
11928 phba->intr_type = NONE;
11929 phba->sli.slistat.sli_intr = 0;
James Smartda0436e2009-05-22 14:51:39 -040011930}
11931
11932/**
James Smart3772a992009-05-22 14:50:54 -040011933 * lpfc_unset_hba - Unset SLI3 hba device initialization
11934 * @phba: pointer to lpfc hba data structure.
11935 *
11936 * This routine is invoked to unset the HBA device initialization steps to
11937 * a device with SLI-3 interface spec.
11938 **/
11939static void
11940lpfc_unset_hba(struct lpfc_hba *phba)
11941{
11942 struct lpfc_vport *vport = phba->pport;
11943 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11944
11945 spin_lock_irq(shost->host_lock);
11946 vport->load_flag |= FC_UNLOADING;
11947 spin_unlock_irq(shost->host_lock);
11948
James Smart72859902012-01-18 16:25:38 -050011949 kfree(phba->vpi_bmask);
11950 kfree(phba->vpi_ids);
11951
James Smart3772a992009-05-22 14:50:54 -040011952 lpfc_stop_hba_timers(phba);
11953
11954 phba->pport->work_port_events = 0;
11955
11956 lpfc_sli_hba_down(phba);
11957
11958 lpfc_sli_brdrestart(phba);
11959
11960 lpfc_sli_disable_intr(phba);
11961
11962 return;
11963}
11964
11965/**
James Smart5af5eee2010-10-22 11:06:38 -040011966 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11967 * @phba: Pointer to HBA context object.
11968 *
11969 * This function is called in the SLI4 code path to wait for completion
11970 * of device's XRIs exchange busy. It will check the XRI exchange busy
11971 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11972 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11973 * I/Os every 30 seconds, log error message, and wait forever. Only when
11974 * all XRI exchange busy complete, the driver unload shall proceed with
11975 * invoking the function reset ioctl mailbox command to the CNA and the
11976 * the rest of the driver unload resource release.
11977 **/
11978static void
11979lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11980{
James Smart5e5b5112019-01-28 11:14:22 -080011981 struct lpfc_sli4_hdw_queue *qp;
James Smartc00f62e2019-08-14 16:57:11 -070011982 int idx, ccnt;
James Smart5af5eee2010-10-22 11:06:38 -040011983 int wait_time = 0;
James Smart5e5b5112019-01-28 11:14:22 -080011984 int io_xri_cmpl = 1;
James Smart86c67372017-04-21 16:05:04 -070011985 int nvmet_xri_cmpl = 1;
James Smart5af5eee2010-10-22 11:06:38 -040011986 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11987
James Smartc3725bd2017-11-20 16:00:42 -080011988 /* Driver just aborted IOs during the hba_unset process. Pause
11989 * here to give the HBA time to complete the IO and get entries
11990 * into the abts lists.
11991 */
11992 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11993
11994 /* Wait for NVME pending IO to flush back to transport. */
11995 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11996 lpfc_nvme_wait_for_io_drain(phba);
11997
James Smart5e5b5112019-01-28 11:14:22 -080011998 ccnt = 0;
James Smart5e5b5112019-01-28 11:14:22 -080011999 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
12000 qp = &phba->sli4_hba.hdwq[idx];
James Smartc00f62e2019-08-14 16:57:11 -070012001 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
12002 if (!io_xri_cmpl) /* if list is NOT empty */
12003 ccnt++;
James Smart5e5b5112019-01-28 11:14:22 -080012004 }
12005 if (ccnt)
12006 io_xri_cmpl = 0;
James Smart5e5b5112019-01-28 11:14:22 -080012007
James Smart86c67372017-04-21 16:05:04 -070012008 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart86c67372017-04-21 16:05:04 -070012009 nvmet_xri_cmpl =
12010 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12011 }
James Smart895427b2017-02-12 13:52:30 -080012012
James Smartc00f62e2019-08-14 16:57:11 -070012013 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
James Smart5af5eee2010-10-22 11:06:38 -040012014 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
James Smart68c9b552018-06-26 08:24:25 -070012015 if (!nvmet_xri_cmpl)
Dick Kennedy372c1872020-06-30 14:50:00 -070012016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart68c9b552018-06-26 08:24:25 -070012017 "6424 NVMET XRI exchange busy "
12018 "wait time: %d seconds.\n",
12019 wait_time/1000);
James Smart5e5b5112019-01-28 11:14:22 -080012020 if (!io_xri_cmpl)
Dick Kennedy372c1872020-06-30 14:50:00 -070012021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartc00f62e2019-08-14 16:57:11 -070012022 "6100 IO XRI exchange busy "
James Smart5af5eee2010-10-22 11:06:38 -040012023 "wait time: %d seconds.\n",
12024 wait_time/1000);
12025 if (!els_xri_cmpl)
Dick Kennedy372c1872020-06-30 14:50:00 -070012026 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5af5eee2010-10-22 11:06:38 -040012027 "2878 ELS XRI exchange busy "
12028 "wait time: %d seconds.\n",
12029 wait_time/1000);
12030 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
12031 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
12032 } else {
12033 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
12034 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
12035 }
James Smart5e5b5112019-01-28 11:14:22 -080012036
12037 ccnt = 0;
James Smart5e5b5112019-01-28 11:14:22 -080012038 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
12039 qp = &phba->sli4_hba.hdwq[idx];
James Smartc00f62e2019-08-14 16:57:11 -070012040 io_xri_cmpl = list_empty(
12041 &qp->lpfc_abts_io_buf_list);
12042 if (!io_xri_cmpl) /* if list is NOT empty */
12043 ccnt++;
James Smart5e5b5112019-01-28 11:14:22 -080012044 }
12045 if (ccnt)
12046 io_xri_cmpl = 0;
James Smart5e5b5112019-01-28 11:14:22 -080012047
James Smart86c67372017-04-21 16:05:04 -070012048 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart86c67372017-04-21 16:05:04 -070012049 nvmet_xri_cmpl = list_empty(
12050 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12051 }
James Smart5af5eee2010-10-22 11:06:38 -040012052 els_xri_cmpl =
12053 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
James Smartf358dd02017-02-12 13:52:34 -080012054
James Smart5af5eee2010-10-22 11:06:38 -040012055 }
12056}
12057
12058/**
James Smartda0436e2009-05-22 14:51:39 -040012059 * lpfc_sli4_hba_unset - Unset the fcoe hba
12060 * @phba: Pointer to HBA context object.
12061 *
12062 * This function is called in the SLI4 code path to reset the HBA's FCoE
12063 * function. The caller is not required to hold any lock. This routine
12064 * issues PCI function reset mailbox command to reset the FCoE function.
12065 * At the end of the function, it calls lpfc_hba_down_post function to
12066 * free any pending commands.
12067 **/
12068static void
12069lpfc_sli4_hba_unset(struct lpfc_hba *phba)
12070{
12071 int wait_cnt = 0;
12072 LPFC_MBOXQ_t *mboxq;
James Smart912e3ac2011-05-24 11:42:11 -040012073 struct pci_dev *pdev = phba->pcidev;
James Smartda0436e2009-05-22 14:51:39 -040012074
12075 lpfc_stop_hba_timers(phba);
James Smartcdb42be2019-01-28 11:14:21 -080012076 if (phba->pport)
12077 phba->sli4_hba.intr_enable = 0;
James Smartda0436e2009-05-22 14:51:39 -040012078
12079 /*
12080 * Gracefully wait out the potential current outstanding asynchronous
12081 * mailbox command.
12082 */
12083
12084 /* First, block any pending async mailbox command from posted */
12085 spin_lock_irq(&phba->hbalock);
12086 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12087 spin_unlock_irq(&phba->hbalock);
12088 /* Now, trying to wait it out if we can */
12089 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12090 msleep(10);
12091 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
12092 break;
12093 }
12094 /* Forcefully release the outstanding mailbox command if timed out */
12095 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12096 spin_lock_irq(&phba->hbalock);
12097 mboxq = phba->sli.mbox_active;
12098 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
12099 __lpfc_mbox_cmpl_put(phba, mboxq);
12100 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12101 phba->sli.mbox_active = NULL;
12102 spin_unlock_irq(&phba->hbalock);
12103 }
12104
James Smart5af5eee2010-10-22 11:06:38 -040012105 /* Abort all iocbs associated with the hba */
12106 lpfc_sli_hba_iocb_abort(phba);
12107
12108 /* Wait for completion of device XRI exchange busy */
12109 lpfc_sli4_xri_exchange_busy_wait(phba);
12110
James Smart93a4d6f2019-11-04 16:57:05 -080012111 /* per-phba callback de-registration for hotplug event */
SeongJae Park46da5472020-06-23 10:41:22 +020012112 if (phba->pport)
12113 lpfc_cpuhp_remove(phba);
James Smart93a4d6f2019-11-04 16:57:05 -080012114
James Smartda0436e2009-05-22 14:51:39 -040012115 /* Disable PCI subsystem interrupt */
12116 lpfc_sli4_disable_intr(phba);
12117
James Smart912e3ac2011-05-24 11:42:11 -040012118 /* Disable SR-IOV if enabled */
12119 if (phba->cfg_sriov_nr_virtfn)
12120 pci_disable_sriov(pdev);
12121
James Smartda0436e2009-05-22 14:51:39 -040012122 /* Stop kthread signal shall trigger work_done one more time */
12123 kthread_stop(phba->worker_thread);
12124
James Smartd2cc9bc2018-09-10 10:30:50 -070012125 /* Disable FW logging to host memory */
James Smart1165a5c2018-11-29 16:09:39 -080012126 lpfc_ras_stop_fwlog(phba);
James Smartd2cc9bc2018-09-10 10:30:50 -070012127
James Smartd1f525a2017-04-21 16:04:55 -070012128 /* Unset the queues shared with the hardware then release all
12129 * allocated resources.
12130 */
12131 lpfc_sli4_queue_unset(phba);
12132 lpfc_sli4_queue_destroy(phba);
12133
James Smart3677a3a2010-09-29 11:19:14 -040012134 /* Reset SLI4 HBA FCoE function */
12135 lpfc_pci_function_reset(phba);
12136
James Smart1165a5c2018-11-29 16:09:39 -080012137 /* Free RAS DMA memory */
12138 if (phba->ras_fwlog.ras_enabled)
12139 lpfc_sli4_ras_dma_free(phba);
12140
James Smartda0436e2009-05-22 14:51:39 -040012141 /* Stop the SLI4 device port */
James Smart1ffdd2c2019-03-04 15:27:51 -080012142 if (phba->pport)
12143 phba->pport->work_port_events = 0;
James Smartda0436e2009-05-22 14:51:39 -040012144}
12145
12146/**
James Smartfedd3b72011-02-16 12:39:24 -050012147 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
12148 * @phba: Pointer to HBA context object.
12149 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12150 *
12151 * This function is called in the SLI4 code path to read the port's
12152 * sli4 capabilities.
12153 *
12154 * This function may be be called from any context that can block-wait
12155 * for the completion. The expectation is that this routine is called
12156 * typically from probe_one or from the online routine.
12157 **/
12158int
12159lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12160{
12161 int rc;
12162 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12163 struct lpfc_pc_sli4_params *sli4_params;
James Smarta183a152011-10-10 21:32:43 -040012164 uint32_t mbox_tmo;
James Smartfedd3b72011-02-16 12:39:24 -050012165 int length;
James Smartbf316c72018-04-09 14:24:28 -070012166 bool exp_wqcq_pages = true;
James Smartfedd3b72011-02-16 12:39:24 -050012167 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12168
James Smart6d368e52011-05-24 11:44:12 -040012169 /*
12170 * By default, the driver assumes the SLI4 port requires RPI
12171 * header postings. The SLI4_PARAM response will correct this
12172 * assumption.
12173 */
12174 phba->sli4_hba.rpi_hdrs_in_use = 1;
12175
James Smartfedd3b72011-02-16 12:39:24 -050012176 /* Read the port's SLI4 Config Parameters */
12177 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12178 sizeof(struct lpfc_sli4_cfg_mhdr));
12179 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12180 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12181 length, LPFC_SLI4_MBX_EMBED);
12182 if (!phba->sli4_hba.intr_enable)
12183 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smarta183a152011-10-10 21:32:43 -040012184 else {
12185 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12186 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12187 }
James Smartfedd3b72011-02-16 12:39:24 -050012188 if (unlikely(rc))
12189 return rc;
12190 sli4_params = &phba->sli4_hba.pc_sli4_params;
12191 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12192 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12193 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12194 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12195 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12196 mbx_sli4_parameters);
12197 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12198 mbx_sli4_parameters);
12199 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12200 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12201 else
12202 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12203 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
James Smartb62232b2021-04-11 18:31:22 -070012204 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
12205 mbx_sli4_parameters);
James Smart1ba981f2014-02-20 09:56:45 -050012206 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
James Smartfedd3b72011-02-16 12:39:24 -050012207 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12208 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12209 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12210 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
James Smart7365f6f2018-02-22 08:18:46 -080012211 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12212 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
James Smart0c651872013-07-15 18:33:23 -040012213 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
James Smart66e9e6b2018-06-26 08:24:27 -070012214 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
James Smart83c6cb12019-10-18 14:18:30 -070012215 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
James Smartfedd3b72011-02-16 12:39:24 -050012216 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12217 mbx_sli4_parameters);
James Smart895427b2017-02-12 13:52:30 -080012218 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
James Smartfedd3b72011-02-16 12:39:24 -050012219 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12220 mbx_sli4_parameters);
James Smart6d368e52011-05-24 11:44:12 -040012221 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12222 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
James Smart895427b2017-02-12 13:52:30 -080012223
James Smartd79c9e92019-08-14 16:57:09 -070012224 /* Check for Extended Pre-Registered SGL support */
12225 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12226
James Smartc15e0702019-05-21 17:49:02 -070012227 /* Check for firmware nvme support */
12228 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12229 bf_get(cfg_xib, mbx_sli4_parameters));
James Smart895427b2017-02-12 13:52:30 -080012230
James Smartc15e0702019-05-21 17:49:02 -070012231 if (rc) {
12232 /* Save this to indicate the Firmware supports NVME */
12233 sli4_params->nvme = 1;
12234
12235 /* Firmware NVME support, check driver FC4 NVME support */
12236 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12238 "6133 Disabling NVME support: "
12239 "FC4 type not supported: x%x\n",
12240 phba->cfg_enable_fc4_type);
12241 goto fcponly;
12242 }
12243 } else {
12244 /* No firmware NVME support, check driver FC4 NVME support */
12245 sli4_params->nvme = 0;
12246 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12248 "6101 Disabling NVME support: Not "
12249 "supported by firmware (%d %d) x%x\n",
12250 bf_get(cfg_nvme, mbx_sli4_parameters),
12251 bf_get(cfg_xib, mbx_sli4_parameters),
12252 phba->cfg_enable_fc4_type);
12253fcponly:
James Smartc15e0702019-05-21 17:49:02 -070012254 phba->nvmet_support = 0;
12255 phba->cfg_nvmet_mrq = 0;
James Smart6a224b42019-08-14 16:56:57 -070012256 phba->cfg_nvme_seg_cnt = 0;
James Smartc15e0702019-05-21 17:49:02 -070012257
12258 /* If no FC4 type support, move to just SCSI support */
12259 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12260 return -ENODEV;
12261 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12262 }
James Smart895427b2017-02-12 13:52:30 -080012263 }
James Smart05580562011-05-24 11:40:48 -040012264
James Smartc26c2652019-08-14 16:56:49 -070012265 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
James Smarta5f73372019-09-21 20:58:50 -070012266 * accommodate 512K and 1M IOs in a single nvme buf.
James Smartc26c2652019-08-14 16:56:49 -070012267 */
James Smarta5f73372019-09-21 20:58:50 -070012268 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
James Smartc26c2652019-08-14 16:56:49 -070012269 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
James Smartc26c2652019-08-14 16:56:49 -070012270
James Smart414abe02018-06-26 08:24:26 -070012271 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
12272 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12273 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12274 phba->cfg_enable_pbde = 0;
James Smart0bc2b7c2018-02-22 08:18:48 -080012275
James Smart20aefac2018-01-30 15:58:58 -080012276 /*
12277 * To support Suppress Response feature we must satisfy 3 conditions.
12278 * lpfc_suppress_rsp module parameter must be set (default).
12279 * In SLI4-Parameters Descriptor:
12280 * Extended Inline Buffers (XIB) must be supported.
12281 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12282 * (double negative).
12283 */
12284 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12285 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
James Smartf358dd02017-02-12 13:52:34 -080012286 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
James Smart20aefac2018-01-30 15:58:58 -080012287 else
12288 phba->cfg_suppress_rsp = 0;
James Smartf358dd02017-02-12 13:52:34 -080012289
James Smart0cf07f842017-06-01 21:07:10 -070012290 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12291 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12292
James Smart05580562011-05-24 11:40:48 -040012293 /* Make sure that sge_supp_len can be handled by the driver */
12294 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12295 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12296
James Smartb5c53952016-03-31 14:12:30 -070012297 /*
James Smartc176ffa2018-01-30 15:58:46 -080012298 * Check whether the adapter supports an embedded copy of the
12299 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12300 * to use this option, 128-byte WQEs must be used.
James Smartb5c53952016-03-31 14:12:30 -070012301 */
12302 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12303 phba->fcp_embed_io = 1;
12304 else
12305 phba->fcp_embed_io = 0;
James Smart7bdedb32016-07-06 12:36:00 -070012306
James Smart0bc2b7c2018-02-22 08:18:48 -080012307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
James Smart414abe02018-06-26 08:24:26 -070012308 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
James Smart0bc2b7c2018-02-22 08:18:48 -080012309 bf_get(cfg_xib, mbx_sli4_parameters),
James Smart414abe02018-06-26 08:24:26 -070012310 phba->cfg_enable_pbde,
James Smartae463b62021-07-07 11:43:32 -070012311 phba->fcp_embed_io, sli4_params->nvme,
James Smart4e565cf2018-02-22 08:18:50 -080012312 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
James Smart0bc2b7c2018-02-22 08:18:48 -080012313
James Smartbf316c72018-04-09 14:24:28 -070012314 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12315 LPFC_SLI_INTF_IF_TYPE_2) &&
12316 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
James Smartc2217682018-05-24 21:09:00 -070012317 LPFC_SLI_INTF_FAMILY_LNCR_A0))
James Smartbf316c72018-04-09 14:24:28 -070012318 exp_wqcq_pages = false;
12319
James Smartc176ffa2018-01-30 15:58:46 -080012320 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12321 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
James Smartbf316c72018-04-09 14:24:28 -070012322 exp_wqcq_pages &&
James Smartc176ffa2018-01-30 15:58:46 -080012323 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12324 phba->enab_exp_wqcq_pages = 1;
12325 else
12326 phba->enab_exp_wqcq_pages = 0;
James Smart7bdedb32016-07-06 12:36:00 -070012327 /*
12328 * Check if the SLI port supports MDS Diagnostics
12329 */
12330 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12331 phba->mds_diags_support = 1;
12332 else
12333 phba->mds_diags_support = 0;
James Smartd2cc9bc2018-09-10 10:30:50 -070012334
James Smart0d8af092019-08-14 16:57:10 -070012335 /*
12336 * Check if the SLI port supports NSLER
12337 */
12338 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12339 phba->nsler = 1;
12340 else
12341 phba->nsler = 0;
12342
James Smart8aaa7bc2020-10-20 13:27:17 -070012343 /* Save PB info for use during HBA setup */
12344 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
12345 sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
12346 sli4_params->mib_size = mbx_sli4_parameters->mib_size;
12347 sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
12348
12349 /* Next we check for Vendor MIB support */
12350 if (sli4_params->mi_ver && phba->cfg_enable_mi)
12351 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
12352
12353 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12354 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
12355 sli4_params->mi_ver, phba->cfg_enable_mi,
12356 sli4_params->mi_value, sli4_params->mib_bde_cnt,
12357 sli4_params->mib_size);
James Smartfedd3b72011-02-16 12:39:24 -050012358 return 0;
12359}
12360
12361/**
James Smart3772a992009-05-22 14:50:54 -040012362 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12363 * @pdev: pointer to PCI device
12364 * @pid: pointer to PCI device identifier
12365 *
12366 * This routine is to be called to attach a device with SLI-3 interface spec
12367 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12368 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12369 * information of the device and driver to see if the driver state that it can
12370 * support this kind of device. If the match is successful, the driver core
12371 * invokes this routine. If this routine determines it can claim the HBA, it
12372 * does all the initialization that it needs to do to handle the HBA properly.
12373 *
12374 * Return code
12375 * 0 - driver can claim the device
12376 * negative value - driver can not claim the device
12377 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080012378static int
James Smart3772a992009-05-22 14:50:54 -040012379lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12380{
12381 struct lpfc_hba *phba;
12382 struct lpfc_vport *vport = NULL;
James Smart6669f9b2009-10-02 15:16:45 -040012383 struct Scsi_Host *shost = NULL;
James Smart3772a992009-05-22 14:50:54 -040012384 int error;
12385 uint32_t cfg_mode, intr_mode;
12386
12387 /* Allocate memory for HBA structure */
12388 phba = lpfc_hba_alloc(pdev);
12389 if (!phba)
12390 return -ENOMEM;
12391
12392 /* Perform generic PCI device enabling operation */
12393 error = lpfc_enable_pci_dev(phba);
James Smart079b5c92011-08-21 21:48:49 -040012394 if (error)
James Smart3772a992009-05-22 14:50:54 -040012395 goto out_free_phba;
James Smart3772a992009-05-22 14:50:54 -040012396
12397 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12398 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12399 if (error)
12400 goto out_disable_pci_dev;
12401
12402 /* Set up SLI-3 specific device PCI memory space */
12403 error = lpfc_sli_pci_mem_setup(phba);
12404 if (error) {
12405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12406 "1402 Failed to set up pci memory space.\n");
12407 goto out_disable_pci_dev;
12408 }
12409
James Smart3772a992009-05-22 14:50:54 -040012410 /* Set up SLI-3 specific device driver resources */
12411 error = lpfc_sli_driver_resource_setup(phba);
12412 if (error) {
12413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12414 "1404 Failed to set up driver resource.\n");
12415 goto out_unset_pci_mem_s3;
12416 }
12417
12418 /* Initialize and populate the iocb list per host */
James Smartd1f525a2017-04-21 16:04:55 -070012419
James Smart3772a992009-05-22 14:50:54 -040012420 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12421 if (error) {
12422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12423 "1405 Failed to initialize iocb list.\n");
12424 goto out_unset_driver_resource_s3;
12425 }
12426
12427 /* Set up common device driver resources */
12428 error = lpfc_setup_driver_resource_phase2(phba);
12429 if (error) {
12430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12431 "1406 Failed to set up driver resource.\n");
12432 goto out_free_iocb_list;
12433 }
12434
James Smart079b5c92011-08-21 21:48:49 -040012435 /* Get the default values for Model Name and Description */
12436 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12437
James Smart3772a992009-05-22 14:50:54 -040012438 /* Create SCSI host to the physical port */
12439 error = lpfc_create_shost(phba);
12440 if (error) {
12441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12442 "1407 Failed to create scsi host.\n");
12443 goto out_unset_driver_resource;
12444 }
12445
12446 /* Configure sysfs attributes */
12447 vport = phba->pport;
12448 error = lpfc_alloc_sysfs_attr(vport);
12449 if (error) {
12450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12451 "1476 Failed to allocate sysfs attr\n");
12452 goto out_destroy_shost;
12453 }
12454
James Smart6669f9b2009-10-02 15:16:45 -040012455 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
James Smart3772a992009-05-22 14:50:54 -040012456 /* Now, trying to enable interrupt and bring up the device */
12457 cfg_mode = phba->cfg_use_msi;
12458 while (true) {
12459 /* Put device to a known state before enabling interrupt */
12460 lpfc_stop_port(phba);
12461 /* Configure and enable interrupt */
12462 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12463 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070012464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040012465 "0431 Failed to enable interrupt.\n");
12466 error = -ENODEV;
12467 goto out_free_sysfs_attr;
12468 }
12469 /* SLI-3 HBA setup */
12470 if (lpfc_sli_hba_setup(phba)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070012471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040012472 "1477 Failed to set up hba\n");
12473 error = -ENODEV;
12474 goto out_remove_device;
12475 }
12476
12477 /* Wait 50ms for the interrupts of previous mailbox commands */
12478 msleep(50);
12479 /* Check active interrupts on message signaled interrupts */
12480 if (intr_mode == 0 ||
12481 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12482 /* Log the current active interrupt mode */
12483 phba->intr_mode = intr_mode;
12484 lpfc_log_intr_mode(phba, intr_mode);
12485 break;
12486 } else {
12487 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12488 "0447 Configure interrupt mode (%d) "
12489 "failed active interrupt test.\n",
12490 intr_mode);
12491 /* Disable the current interrupt mode */
12492 lpfc_sli_disable_intr(phba);
12493 /* Try next level of interrupt mode */
12494 cfg_mode = --intr_mode;
12495 }
12496 }
12497
12498 /* Perform post initialization setup */
12499 lpfc_post_init_setup(phba);
12500
12501 /* Check if there are static vports to be created. */
12502 lpfc_create_static_vport(phba);
12503
12504 return 0;
12505
12506out_remove_device:
12507 lpfc_unset_hba(phba);
12508out_free_sysfs_attr:
12509 lpfc_free_sysfs_attr(vport);
12510out_destroy_shost:
12511 lpfc_destroy_shost(phba);
12512out_unset_driver_resource:
12513 lpfc_unset_driver_resource_phase2(phba);
12514out_free_iocb_list:
12515 lpfc_free_iocb_list(phba);
12516out_unset_driver_resource_s3:
12517 lpfc_sli_driver_resource_unset(phba);
12518out_unset_pci_mem_s3:
12519 lpfc_sli_pci_mem_unset(phba);
12520out_disable_pci_dev:
12521 lpfc_disable_pci_dev(phba);
James Smart6669f9b2009-10-02 15:16:45 -040012522 if (shost)
12523 scsi_host_put(shost);
James Smart3772a992009-05-22 14:50:54 -040012524out_free_phba:
12525 lpfc_hba_free(phba);
12526 return error;
12527}
12528
12529/**
12530 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
James Smarte59058c2008-08-24 21:49:00 -040012531 * @pdev: pointer to PCI device
12532 *
James Smart3772a992009-05-22 14:50:54 -040012533 * This routine is to be called to disattach a device with SLI-3 interface
12534 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12535 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12536 * device to be removed from the PCI subsystem properly.
James Smarte59058c2008-08-24 21:49:00 -040012537 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080012538static void
James Smart3772a992009-05-22 14:50:54 -040012539lpfc_pci_remove_one_s3(struct pci_dev *pdev)
dea31012005-04-17 16:05:31 -050012540{
James Smart2e0fef82007-06-17 19:56:36 -050012541 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12542 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
James Smarteada2722008-12-04 22:39:13 -050012543 struct lpfc_vport **vports;
James Smart2e0fef82007-06-17 19:56:36 -050012544 struct lpfc_hba *phba = vport->phba;
James Smarteada2722008-12-04 22:39:13 -050012545 int i;
Tomohiro Kusumi8a4df1202008-01-11 01:53:00 -050012546
James Smart549e55c2007-08-02 11:09:51 -040012547 spin_lock_irq(&phba->hbalock);
James Smart51ef4c22007-08-02 11:10:31 -040012548 vport->load_flag |= FC_UNLOADING;
James Smart549e55c2007-08-02 11:09:51 -040012549 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050012550
James Smart858c9f62007-06-17 19:56:39 -050012551 lpfc_free_sysfs_attr(vport);
12552
James Smarteada2722008-12-04 22:39:13 -050012553 /* Release all the vports against this physical port */
12554 vports = lpfc_create_vport_work_array(phba);
12555 if (vports != NULL)
James Smart587a37f2012-05-09 21:16:03 -040012556 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12557 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12558 continue;
James Smarteada2722008-12-04 22:39:13 -050012559 fc_vport_terminate(vports[i]->fc_vport);
James Smart587a37f2012-05-09 21:16:03 -040012560 }
James Smarteada2722008-12-04 22:39:13 -050012561 lpfc_destroy_vport_work_array(phba, vports);
12562
James Smart95f0ef82020-11-15 11:26:32 -080012563 /* Remove FC host with the physical port */
James Smart858c9f62007-06-17 19:56:39 -050012564 fc_remove_host(shost);
James Smarte9b11082020-11-15 11:26:33 -080012565 scsi_remove_host(shost);
James Smartd613b6a2017-02-12 13:52:37 -080012566
James Smart95f0ef82020-11-15 11:26:32 -080012567 /* Clean up all nodes, mailboxes and IOs. */
James Smart87af33f2007-10-27 13:37:43 -040012568 lpfc_cleanup(vport);
12569
James Smart2e0fef82007-06-17 19:56:36 -050012570 /*
12571 * Bring down the SLI Layer. This step disable all interrupts,
12572 * clears the rings, discards all mailbox commands, and resets
12573 * the HBA.
12574 */
James Smarta257bf92009-04-06 18:48:10 -040012575
Justin P. Mattock48e34d02010-12-30 15:07:58 -080012576 /* HBA interrupt will be disabled after this call */
James Smart2e0fef82007-06-17 19:56:36 -050012577 lpfc_sli_hba_down(phba);
James Smarta257bf92009-04-06 18:48:10 -040012578 /* Stop kthread signal shall trigger work_done one more time */
12579 kthread_stop(phba->worker_thread);
12580 /* Final cleanup of txcmplq and reset the HBA */
James Smart2e0fef82007-06-17 19:56:36 -050012581 lpfc_sli_brdrestart(phba);
12582
James Smart72859902012-01-18 16:25:38 -050012583 kfree(phba->vpi_bmask);
12584 kfree(phba->vpi_ids);
12585
James Smart3772a992009-05-22 14:50:54 -040012586 lpfc_stop_hba_timers(phba);
James Smart523128e2018-09-10 10:30:46 -070012587 spin_lock_irq(&phba->port_list_lock);
James Smart858c9f62007-06-17 19:56:39 -050012588 list_del_init(&vport->listentry);
James Smart523128e2018-09-10 10:30:46 -070012589 spin_unlock_irq(&phba->port_list_lock);
James Smart858c9f62007-06-17 19:56:39 -050012590
James Smart858c9f62007-06-17 19:56:39 -050012591 lpfc_debugfs_terminate(vport);
James Smart2e0fef82007-06-17 19:56:36 -050012592
James Smart912e3ac2011-05-24 11:42:11 -040012593 /* Disable SR-IOV if enabled */
12594 if (phba->cfg_sriov_nr_virtfn)
12595 pci_disable_sriov(pdev);
12596
James Smart5b75da22008-12-04 22:39:35 -050012597 /* Disable interrupt */
James Smart3772a992009-05-22 14:50:54 -040012598 lpfc_sli_disable_intr(phba);
dea31012005-04-17 16:05:31 -050012599
James Smart858c9f62007-06-17 19:56:39 -050012600 scsi_host_put(shost);
James Smart2e0fef82007-06-17 19:56:36 -050012601
12602 /*
12603 * Call scsi_free before mem_free since scsi bufs are released to their
12604 * corresponding pools here.
12605 */
12606 lpfc_scsi_free(phba);
James Smart0794d602019-01-28 11:14:19 -080012607 lpfc_free_iocb_list(phba);
12608
James Smart3772a992009-05-22 14:50:54 -040012609 lpfc_mem_free_all(phba);
James Smart2e0fef82007-06-17 19:56:36 -050012610
James Smart34b02dc2008-08-24 21:49:55 -040012611 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12612 phba->hbqslimp.virt, phba->hbqslimp.phys);
James Smarted957682007-06-17 19:56:37 -050012613
James Smart2e0fef82007-06-17 19:56:36 -050012614 /* Free resources associated with SLI2 interface */
12615 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
James Smart34b02dc2008-08-24 21:49:55 -040012616 phba->slim2p.virt, phba->slim2p.phys);
James Smart2e0fef82007-06-17 19:56:36 -050012617
12618 /* unmap adapter SLIM and Control Registers */
12619 iounmap(phba->ctrl_regs_memmap_p);
12620 iounmap(phba->slim_memmap_p);
12621
James Smart3772a992009-05-22 14:50:54 -040012622 lpfc_hba_free(phba);
James Smart2e0fef82007-06-17 19:56:36 -050012623
Johannes Thumshirne0c04832016-06-07 09:44:03 +020012624 pci_release_mem_regions(pdev);
James Smart2e0fef82007-06-17 19:56:36 -050012625 pci_disable_device(pdev);
dea31012005-04-17 16:05:31 -050012626}
12627
Linas Vepstas8d63f372007-02-14 14:28:36 -060012628/**
James Smart3772a992009-05-22 14:50:54 -040012629 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053012630 * @dev_d: pointer to device
James Smart3a55b532008-12-04 22:38:54 -050012631 *
James Smart3772a992009-05-22 14:50:54 -040012632 * This routine is to be called from the kernel's PCI subsystem to support
12633 * system Power Management (PM) to device with SLI-3 interface spec. When
12634 * PM invokes this method, it quiesces the device by stopping the driver's
12635 * worker thread for the device, turning off device's interrupt and DMA,
12636 * and bring the device offline. Note that as the driver implements the
12637 * minimum PM requirements to a power-aware driver's PM support for the
12638 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12639 * to the suspend() method call will be treated as SUSPEND and the driver will
12640 * fully reinitialize its device during resume() method call, the driver will
12641 * set device to PCI_D3hot state in PCI config space instead of setting it
12642 * according to the @msg provided by the PM.
James Smart3a55b532008-12-04 22:38:54 -050012643 *
12644 * Return code
James Smart3772a992009-05-22 14:50:54 -040012645 * 0 - driver suspended the device
12646 * Error otherwise
James Smart3a55b532008-12-04 22:38:54 -050012647 **/
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053012648static int __maybe_unused
12649lpfc_pci_suspend_one_s3(struct device *dev_d)
James Smart3a55b532008-12-04 22:38:54 -050012650{
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053012651 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
James Smart3a55b532008-12-04 22:38:54 -050012652 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12653
12654 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12655 "0473 PCI device Power Management suspend.\n");
12656
12657 /* Bring down the device */
James Smart618a5232012-06-12 13:54:36 -040012658 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
James Smart3a55b532008-12-04 22:38:54 -050012659 lpfc_offline(phba);
12660 kthread_stop(phba->worker_thread);
12661
12662 /* Disable interrupt from device */
James Smart3772a992009-05-22 14:50:54 -040012663 lpfc_sli_disable_intr(phba);
James Smart3a55b532008-12-04 22:38:54 -050012664
James Smart3a55b532008-12-04 22:38:54 -050012665 return 0;
12666}
12667
12668/**
James Smart3772a992009-05-22 14:50:54 -040012669 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053012670 * @dev_d: pointer to device
James Smart3a55b532008-12-04 22:38:54 -050012671 *
James Smart3772a992009-05-22 14:50:54 -040012672 * This routine is to be called from the kernel's PCI subsystem to support
12673 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12674 * invokes this method, it restores the device's PCI config space state and
12675 * fully reinitializes the device and brings it online. Note that as the
12676 * driver implements the minimum PM requirements to a power-aware driver's
12677 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12678 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12679 * driver will fully reinitialize its device during resume() method call,
12680 * the device will be set to PCI_D0 directly in PCI config space before
12681 * restoring the state.
James Smart3a55b532008-12-04 22:38:54 -050012682 *
12683 * Return code
James Smart3772a992009-05-22 14:50:54 -040012684 * 0 - driver suspended the device
12685 * Error otherwise
James Smart3a55b532008-12-04 22:38:54 -050012686 **/
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053012687static int __maybe_unused
12688lpfc_pci_resume_one_s3(struct device *dev_d)
James Smart3a55b532008-12-04 22:38:54 -050012689{
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053012690 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
James Smart3a55b532008-12-04 22:38:54 -050012691 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
James Smart5b75da22008-12-04 22:39:35 -050012692 uint32_t intr_mode;
James Smart3a55b532008-12-04 22:38:54 -050012693 int error;
12694
12695 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12696 "0452 PCI device Power Management resume.\n");
12697
James Smart3a55b532008-12-04 22:38:54 -050012698 /* Startup the kernel thread for this host adapter. */
12699 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12700 "lpfc_worker_%d", phba->brd_no);
12701 if (IS_ERR(phba->worker_thread)) {
12702 error = PTR_ERR(phba->worker_thread);
12703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12704 "0434 PM resume failed to start worker "
12705 "thread: error=x%x.\n", error);
12706 return error;
12707 }
12708
James Smart5b75da22008-12-04 22:39:35 -050012709 /* Configure and enable interrupt */
James Smart3772a992009-05-22 14:50:54 -040012710 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
James Smart5b75da22008-12-04 22:39:35 -050012711 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070012712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5b75da22008-12-04 22:39:35 -050012713 "0430 PM resume Failed to enable interrupt\n");
12714 return -EIO;
12715 } else
12716 phba->intr_mode = intr_mode;
James Smart3a55b532008-12-04 22:38:54 -050012717
12718 /* Restart HBA and bring it online */
12719 lpfc_sli_brdrestart(phba);
12720 lpfc_online(phba);
12721
James Smart5b75da22008-12-04 22:39:35 -050012722 /* Log the current active interrupt mode */
12723 lpfc_log_intr_mode(phba, phba->intr_mode);
12724
James Smart3a55b532008-12-04 22:38:54 -050012725 return 0;
12726}
12727
12728/**
James Smart891478a2009-11-18 15:40:23 -050012729 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12730 * @phba: pointer to lpfc hba data structure.
12731 *
12732 * This routine is called to prepare the SLI3 device for PCI slot recover. It
James Smarte2af0d22010-03-15 11:25:32 -040012733 * aborts all the outstanding SCSI I/Os to the pci device.
James Smart891478a2009-11-18 15:40:23 -050012734 **/
12735static void
12736lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12737{
Dick Kennedy372c1872020-06-30 14:50:00 -070012738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart891478a2009-11-18 15:40:23 -050012739 "2723 PCI channel I/O abort preparing for recovery\n");
James Smarte2af0d22010-03-15 11:25:32 -040012740
12741 /*
12742 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12743 * and let the SCSI mid-layer to retry them to recover.
12744 */
James Smartdb55fba2014-04-04 13:52:02 -040012745 lpfc_sli_abort_fcp_rings(phba);
James Smart891478a2009-11-18 15:40:23 -050012746}
12747
12748/**
James Smart0d878412009-10-02 15:16:56 -040012749 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12750 * @phba: pointer to lpfc hba data structure.
12751 *
12752 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12753 * disables the device interrupt and pci device, and aborts the internal FCP
12754 * pending I/Os.
12755 **/
12756static void
12757lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12758{
Dick Kennedy372c1872020-06-30 14:50:00 -070012759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart891478a2009-11-18 15:40:23 -050012760 "2710 PCI channel disable preparing for reset\n");
James Smarte2af0d22010-03-15 11:25:32 -040012761
James Smart75baf692010-06-08 18:31:21 -040012762 /* Block any management I/Os to the device */
James Smart618a5232012-06-12 13:54:36 -040012763 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
James Smart75baf692010-06-08 18:31:21 -040012764
James Smarte2af0d22010-03-15 11:25:32 -040012765 /* Block all SCSI devices' I/Os on the host */
12766 lpfc_scsi_dev_block(phba);
12767
James Smartea714f32013-04-17 20:18:47 -040012768 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
James Smartc00f62e2019-08-14 16:57:11 -070012769 lpfc_sli_flush_io_rings(phba);
James Smartea714f32013-04-17 20:18:47 -040012770
James Smarte2af0d22010-03-15 11:25:32 -040012771 /* stop all timers */
12772 lpfc_stop_hba_timers(phba);
12773
James Smart0d878412009-10-02 15:16:56 -040012774 /* Disable interrupt and pci device */
12775 lpfc_sli_disable_intr(phba);
12776 pci_disable_device(phba->pcidev);
James Smart0d878412009-10-02 15:16:56 -040012777}
12778
12779/**
12780 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12781 * @phba: pointer to lpfc hba data structure.
12782 *
12783 * This routine is called to prepare the SLI3 device for PCI slot permanently
12784 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12785 * pending I/Os.
12786 **/
12787static void
James Smart75baf692010-06-08 18:31:21 -040012788lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
James Smart0d878412009-10-02 15:16:56 -040012789{
Dick Kennedy372c1872020-06-30 14:50:00 -070012790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart891478a2009-11-18 15:40:23 -050012791 "2711 PCI channel permanent disable for failure\n");
James Smarte2af0d22010-03-15 11:25:32 -040012792 /* Block all SCSI devices' I/Os on the host */
12793 lpfc_scsi_dev_block(phba);
12794
12795 /* stop all timers */
12796 lpfc_stop_hba_timers(phba);
12797
James Smart0d878412009-10-02 15:16:56 -040012798 /* Clean up all driver's outstanding SCSI I/Os */
James Smartc00f62e2019-08-14 16:57:11 -070012799 lpfc_sli_flush_io_rings(phba);
James Smart0d878412009-10-02 15:16:56 -040012800}
12801
12802/**
James Smart3772a992009-05-22 14:50:54 -040012803 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
James Smarte59058c2008-08-24 21:49:00 -040012804 * @pdev: pointer to PCI device.
12805 * @state: the current PCI connection state.
Linas Vepstas8d63f372007-02-14 14:28:36 -060012806 *
James Smart3772a992009-05-22 14:50:54 -040012807 * This routine is called from the PCI subsystem for I/O error handling to
12808 * device with SLI-3 interface spec. This function is called by the PCI
12809 * subsystem after a PCI bus error affecting this device has been detected.
12810 * When this function is invoked, it will need to stop all the I/Os and
12811 * interrupt(s) to the device. Once that is done, it will return
12812 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12813 * as desired.
James Smarte59058c2008-08-24 21:49:00 -040012814 *
12815 * Return codes
James Smart0d878412009-10-02 15:16:56 -040012816 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
James Smart3772a992009-05-22 14:50:54 -040012817 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12818 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
James Smarte59058c2008-08-24 21:49:00 -040012819 **/
James Smart3772a992009-05-22 14:50:54 -040012820static pci_ers_result_t
12821lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
Linas Vepstas8d63f372007-02-14 14:28:36 -060012822{
James Smart51ef4c22007-08-02 11:10:31 -040012823 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12824 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
Linas Vepstas8d63f372007-02-14 14:28:36 -060012825
James Smart0d878412009-10-02 15:16:56 -040012826 switch (state) {
12827 case pci_channel_io_normal:
James Smart891478a2009-11-18 15:40:23 -050012828 /* Non-fatal error, prepare for recovery */
12829 lpfc_sli_prep_dev_for_recover(phba);
James Smart0d878412009-10-02 15:16:56 -040012830 return PCI_ERS_RESULT_CAN_RECOVER;
12831 case pci_channel_io_frozen:
12832 /* Fatal error, prepare for slot reset */
12833 lpfc_sli_prep_dev_for_reset(phba);
12834 return PCI_ERS_RESULT_NEED_RESET;
12835 case pci_channel_io_perm_failure:
12836 /* Permanent failure, prepare for device down */
James Smart75baf692010-06-08 18:31:21 -040012837 lpfc_sli_prep_dev_for_perm_failure(phba);
Linas Vepstas8d63f372007-02-14 14:28:36 -060012838 return PCI_ERS_RESULT_DISCONNECT;
James Smart0d878412009-10-02 15:16:56 -040012839 default:
12840 /* Unknown state, prepare and request slot reset */
Dick Kennedy372c1872020-06-30 14:50:00 -070012841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0d878412009-10-02 15:16:56 -040012842 "0472 Unknown PCI error state: x%x\n", state);
12843 lpfc_sli_prep_dev_for_reset(phba);
12844 return PCI_ERS_RESULT_NEED_RESET;
James Smarta8e497d2008-08-24 21:50:11 -040012845 }
Linas Vepstas8d63f372007-02-14 14:28:36 -060012846}
12847
12848/**
James Smart3772a992009-05-22 14:50:54 -040012849 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
James Smarte59058c2008-08-24 21:49:00 -040012850 * @pdev: pointer to PCI device.
Linas Vepstas8d63f372007-02-14 14:28:36 -060012851 *
James Smart3772a992009-05-22 14:50:54 -040012852 * This routine is called from the PCI subsystem for error handling to
12853 * device with SLI-3 interface spec. This is called after PCI bus has been
12854 * reset to restart the PCI card from scratch, as if from a cold-boot.
12855 * During the PCI subsystem error recovery, after driver returns
12856 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12857 * recovery and then call this routine before calling the .resume method
12858 * to recover the device. This function will initialize the HBA device,
12859 * enable the interrupt, but it will just put the HBA to offline state
12860 * without passing any I/O traffic.
James Smarte59058c2008-08-24 21:49:00 -040012861 *
12862 * Return codes
James Smart3772a992009-05-22 14:50:54 -040012863 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12864 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
Linas Vepstas8d63f372007-02-14 14:28:36 -060012865 */
James Smart3772a992009-05-22 14:50:54 -040012866static pci_ers_result_t
12867lpfc_io_slot_reset_s3(struct pci_dev *pdev)
Linas Vepstas8d63f372007-02-14 14:28:36 -060012868{
James Smart51ef4c22007-08-02 11:10:31 -040012869 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12870 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
Linas Vepstas8d63f372007-02-14 14:28:36 -060012871 struct lpfc_sli *psli = &phba->sli;
James Smart5b75da22008-12-04 22:39:35 -050012872 uint32_t intr_mode;
Linas Vepstas8d63f372007-02-14 14:28:36 -060012873
12874 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
Benjamin Herrenschmidt09483912007-12-20 15:28:09 +110012875 if (pci_enable_device_mem(pdev)) {
Linas Vepstas8d63f372007-02-14 14:28:36 -060012876 printk(KERN_ERR "lpfc: Cannot re-enable "
12877 "PCI device after reset.\n");
12878 return PCI_ERS_RESULT_DISCONNECT;
12879 }
12880
James Smart97207482008-12-04 22:39:19 -050012881 pci_restore_state(pdev);
James Smart1dfb5a42010-02-12 14:40:50 -050012882
12883 /*
12884 * As the new kernel behavior of pci_restore_state() API call clears
12885 * device saved_state flag, need to save the restored state again.
12886 */
12887 pci_save_state(pdev);
12888
James Smart97207482008-12-04 22:39:19 -050012889 if (pdev->is_busmaster)
12890 pci_set_master(pdev);
Linas Vepstas8d63f372007-02-14 14:28:36 -060012891
James Smart92d7f7b2007-06-17 19:56:38 -050012892 spin_lock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040012893 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart92d7f7b2007-06-17 19:56:38 -050012894 spin_unlock_irq(&phba->hbalock);
Linas Vepstas8d63f372007-02-14 14:28:36 -060012895
James Smart5b75da22008-12-04 22:39:35 -050012896 /* Configure and enable interrupt */
James Smart3772a992009-05-22 14:50:54 -040012897 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
James Smart5b75da22008-12-04 22:39:35 -050012898 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070012899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart5b75da22008-12-04 22:39:35 -050012900 "0427 Cannot re-enable interrupt after "
12901 "slot reset.\n");
12902 return PCI_ERS_RESULT_DISCONNECT;
12903 } else
12904 phba->intr_mode = intr_mode;
Linas Vepstas8d63f372007-02-14 14:28:36 -060012905
James Smart75baf692010-06-08 18:31:21 -040012906 /* Take device offline, it will perform cleanup */
James Smart618a5232012-06-12 13:54:36 -040012907 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
Linas Vepstas8d63f372007-02-14 14:28:36 -060012908 lpfc_offline(phba);
12909 lpfc_sli_brdrestart(phba);
12910
James Smart5b75da22008-12-04 22:39:35 -050012911 /* Log the current active interrupt mode */
12912 lpfc_log_intr_mode(phba, phba->intr_mode);
12913
Linas Vepstas8d63f372007-02-14 14:28:36 -060012914 return PCI_ERS_RESULT_RECOVERED;
12915}
12916
12917/**
James Smart3772a992009-05-22 14:50:54 -040012918 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
James Smarte59058c2008-08-24 21:49:00 -040012919 * @pdev: pointer to PCI device
Linas Vepstas8d63f372007-02-14 14:28:36 -060012920 *
James Smart3772a992009-05-22 14:50:54 -040012921 * This routine is called from the PCI subsystem for error handling to device
12922 * with SLI-3 interface spec. It is called when kernel error recovery tells
12923 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12924 * error recovery. After this call, traffic can start to flow from this device
12925 * again.
Linas Vepstas8d63f372007-02-14 14:28:36 -060012926 */
James Smart3772a992009-05-22 14:50:54 -040012927static void
12928lpfc_io_resume_s3(struct pci_dev *pdev)
Linas Vepstas8d63f372007-02-14 14:28:36 -060012929{
James Smart51ef4c22007-08-02 11:10:31 -040012930 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12931 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
Linas Vepstas8d63f372007-02-14 14:28:36 -060012932
James Smarte2af0d22010-03-15 11:25:32 -040012933 /* Bring device online, it will be no-op for non-fatal error resume */
James Smart58da1ff2008-04-07 10:15:56 -040012934 lpfc_online(phba);
Linas Vepstas8d63f372007-02-14 14:28:36 -060012935}
12936
James Smart3772a992009-05-22 14:50:54 -040012937/**
James Smartda0436e2009-05-22 14:51:39 -040012938 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12939 * @phba: pointer to lpfc hba data structure.
12940 *
12941 * returns the number of ELS/CT IOCBs to reserve
12942 **/
12943int
12944lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12945{
12946 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12947
James Smartf1126682009-06-10 17:22:44 -040012948 if (phba->sli_rev == LPFC_SLI_REV4) {
12949 if (max_xri <= 100)
James Smart6a9c52c2009-10-02 15:16:51 -040012950 return 10;
James Smartf1126682009-06-10 17:22:44 -040012951 else if (max_xri <= 256)
James Smart6a9c52c2009-10-02 15:16:51 -040012952 return 25;
James Smartf1126682009-06-10 17:22:44 -040012953 else if (max_xri <= 512)
James Smart6a9c52c2009-10-02 15:16:51 -040012954 return 50;
James Smartf1126682009-06-10 17:22:44 -040012955 else if (max_xri <= 1024)
James Smart6a9c52c2009-10-02 15:16:51 -040012956 return 100;
James Smart8a9d2e82012-05-09 21:16:12 -040012957 else if (max_xri <= 1536)
James Smart6a9c52c2009-10-02 15:16:51 -040012958 return 150;
James Smart8a9d2e82012-05-09 21:16:12 -040012959 else if (max_xri <= 2048)
12960 return 200;
12961 else
12962 return 250;
James Smartf1126682009-06-10 17:22:44 -040012963 } else
12964 return 0;
James Smartda0436e2009-05-22 14:51:39 -040012965}
12966
12967/**
James Smart895427b2017-02-12 13:52:30 -080012968 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12969 * @phba: pointer to lpfc hba data structure.
12970 *
James Smartf358dd02017-02-12 13:52:34 -080012971 * returns the number of ELS/CT + NVMET IOCBs to reserve
James Smart895427b2017-02-12 13:52:30 -080012972 **/
12973int
12974lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12975{
12976 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12977
James Smartf358dd02017-02-12 13:52:34 -080012978 if (phba->nvmet_support)
12979 max_xri += LPFC_NVMET_BUF_POST;
James Smart895427b2017-02-12 13:52:30 -080012980 return max_xri;
12981}
12982
12983
James Smart0a5ce732019-10-18 14:18:18 -070012984static int
James Smart1feb8202018-02-22 08:18:47 -080012985lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12986 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12987 const struct firmware *fw)
12988{
James Smart0a5ce732019-10-18 14:18:18 -070012989 int rc;
James Smart1feb8202018-02-22 08:18:47 -080012990
James Smart0a5ce732019-10-18 14:18:18 -070012991 /* Three cases: (1) FW was not supported on the detected adapter.
12992 * (2) FW update has been locked out administratively.
12993 * (3) Some other error during FW update.
12994 * In each case, an unmaskable message is written to the console
12995 * for admin diagnosis.
12996 */
12997 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
James Smart1feb8202018-02-22 08:18:47 -080012998 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
James Smart5792a0e2019-10-25 11:43:42 -070012999 magic_number != MAGIC_NUMBER_G6) ||
James Smart1feb8202018-02-22 08:18:47 -080013000 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
James Smart5792a0e2019-10-25 11:43:42 -070013001 magic_number != MAGIC_NUMBER_G7)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a5ce732019-10-18 14:18:18 -070013003 "3030 This firmware version is not supported on"
13004 " this HBA model. Device:%x Magic:%x Type:%x "
13005 "ID:%x Size %d %zd\n",
13006 phba->pcidev->device, magic_number, ftype, fid,
13007 fsize, fw->size);
13008 rc = -EINVAL;
13009 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a5ce732019-10-18 14:18:18 -070013011 "3021 Firmware downloads have been prohibited "
13012 "by a system configuration setting on "
13013 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13014 "%zd\n",
13015 phba->pcidev->device, magic_number, ftype, fid,
13016 fsize, fw->size);
13017 rc = -EACCES;
13018 } else {
Dick Kennedy372c1872020-06-30 14:50:00 -070013019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a5ce732019-10-18 14:18:18 -070013020 "3022 FW Download failed. Add Status x%x "
13021 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13022 "%zd\n",
13023 offset, phba->pcidev->device, magic_number,
13024 ftype, fid, fsize, fw->size);
13025 rc = -EIO;
13026 }
13027 return rc;
James Smart1feb8202018-02-22 08:18:47 -080013028}
James Smart1feb8202018-02-22 08:18:47 -080013029
James Smart895427b2017-02-12 13:52:30 -080013030/**
James Smart52d52442011-05-24 11:42:45 -040013031 * lpfc_write_firmware - attempt to write a firmware image to the port
James Smart52d52442011-05-24 11:42:45 -040013032 * @fw: pointer to firmware image returned from request_firmware.
James Smart0a5ce732019-10-18 14:18:18 -070013033 * @context: pointer to firmware image returned from request_firmware.
James Smart52d52442011-05-24 11:42:45 -040013034 *
James Smart52d52442011-05-24 11:42:45 -040013035 **/
James Smartce396282012-09-29 11:30:56 -040013036static void
13037lpfc_write_firmware(const struct firmware *fw, void *context)
James Smart52d52442011-05-24 11:42:45 -040013038{
James Smartce396282012-09-29 11:30:56 -040013039 struct lpfc_hba *phba = (struct lpfc_hba *)context;
James Smart6b5151f2012-01-18 16:24:06 -050013040 char fwrev[FW_REV_STR_SIZE];
James Smartce396282012-09-29 11:30:56 -040013041 struct lpfc_grp_hdr *image;
James Smart52d52442011-05-24 11:42:45 -040013042 struct list_head dma_buffer_list;
13043 int i, rc = 0;
13044 struct lpfc_dmabuf *dmabuf, *next;
13045 uint32_t offset = 0, temp_offset = 0;
James Smart6b6ef5d2016-10-13 15:06:17 -070013046 uint32_t magic_number, ftype, fid, fsize;
James Smart52d52442011-05-24 11:42:45 -040013047
James Smartc71ab862012-10-31 14:44:33 -040013048 /* It can be null in no-wait mode, sanity check */
James Smartce396282012-09-29 11:30:56 -040013049 if (!fw) {
13050 rc = -ENXIO;
13051 goto out;
13052 }
13053 image = (struct lpfc_grp_hdr *)fw->data;
13054
James Smart6b6ef5d2016-10-13 15:06:17 -070013055 magic_number = be32_to_cpu(image->magic_number);
13056 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
James Smart1feb8202018-02-22 08:18:47 -080013057 fid = bf_get_be32(lpfc_grp_hdr_id, image);
James Smart6b6ef5d2016-10-13 15:06:17 -070013058 fsize = be32_to_cpu(image->size);
13059
James Smart52d52442011-05-24 11:42:45 -040013060 INIT_LIST_HEAD(&dma_buffer_list);
James Smart52d52442011-05-24 11:42:45 -040013061 lpfc_decode_firmware_rev(phba, fwrev, 1);
James Smart88a2cfb2011-07-22 18:36:33 -040013062 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013063 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartce396282012-09-29 11:30:56 -040013064 "3023 Updating Firmware, Current Version:%s "
James Smart52d52442011-05-24 11:42:45 -040013065 "New Version:%s\n",
James Smart88a2cfb2011-07-22 18:36:33 -040013066 fwrev, image->revision);
James Smart52d52442011-05-24 11:42:45 -040013067 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
13068 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
13069 GFP_KERNEL);
13070 if (!dmabuf) {
13071 rc = -ENOMEM;
James Smartce396282012-09-29 11:30:56 -040013072 goto release_out;
James Smart52d52442011-05-24 11:42:45 -040013073 }
13074 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
13075 SLI4_PAGE_SIZE,
13076 &dmabuf->phys,
13077 GFP_KERNEL);
13078 if (!dmabuf->virt) {
13079 kfree(dmabuf);
13080 rc = -ENOMEM;
James Smartce396282012-09-29 11:30:56 -040013081 goto release_out;
James Smart52d52442011-05-24 11:42:45 -040013082 }
13083 list_add_tail(&dmabuf->list, &dma_buffer_list);
13084 }
13085 while (offset < fw->size) {
13086 temp_offset = offset;
13087 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
James Smart079b5c92011-08-21 21:48:49 -040013088 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
James Smart52d52442011-05-24 11:42:45 -040013089 memcpy(dmabuf->virt,
13090 fw->data + temp_offset,
James Smart079b5c92011-08-21 21:48:49 -040013091 fw->size - temp_offset);
13092 temp_offset = fw->size;
James Smart52d52442011-05-24 11:42:45 -040013093 break;
13094 }
James Smart52d52442011-05-24 11:42:45 -040013095 memcpy(dmabuf->virt, fw->data + temp_offset,
13096 SLI4_PAGE_SIZE);
James Smart88a2cfb2011-07-22 18:36:33 -040013097 temp_offset += SLI4_PAGE_SIZE;
James Smart52d52442011-05-24 11:42:45 -040013098 }
13099 rc = lpfc_wr_object(phba, &dma_buffer_list,
13100 (fw->size - offset), &offset);
James Smart1feb8202018-02-22 08:18:47 -080013101 if (rc) {
James Smart0a5ce732019-10-18 14:18:18 -070013102 rc = lpfc_log_write_firmware_error(phba, offset,
13103 magic_number,
13104 ftype,
13105 fid,
13106 fsize,
13107 fw);
James Smartce396282012-09-29 11:30:56 -040013108 goto release_out;
James Smart1feb8202018-02-22 08:18:47 -080013109 }
James Smart52d52442011-05-24 11:42:45 -040013110 }
13111 rc = offset;
James Smart1feb8202018-02-22 08:18:47 -080013112 } else
Dick Kennedy372c1872020-06-30 14:50:00 -070013113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart1feb8202018-02-22 08:18:47 -080013114 "3029 Skipped Firmware update, Current "
13115 "Version:%s New Version:%s\n",
13116 fwrev, image->revision);
James Smartce396282012-09-29 11:30:56 -040013117
13118release_out:
James Smart52d52442011-05-24 11:42:45 -040013119 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
13120 list_del(&dmabuf->list);
13121 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13122 dmabuf->virt, dmabuf->phys);
13123 kfree(dmabuf);
13124 }
James Smartce396282012-09-29 11:30:56 -040013125 release_firmware(fw);
13126out:
James Smart0a5ce732019-10-18 14:18:18 -070013127 if (rc < 0)
Dick Kennedy372c1872020-06-30 14:50:00 -070013128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a5ce732019-10-18 14:18:18 -070013129 "3062 Firmware update error, status %d.\n", rc);
13130 else
Dick Kennedy372c1872020-06-30 14:50:00 -070013131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0a5ce732019-10-18 14:18:18 -070013132 "3024 Firmware update success: size %d.\n", rc);
James Smart52d52442011-05-24 11:42:45 -040013133}
13134
13135/**
James Smartc71ab862012-10-31 14:44:33 -040013136 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
13137 * @phba: pointer to lpfc hba data structure.
Lee Jonesfe614ac2020-07-23 13:24:22 +010013138 * @fw_upgrade: which firmware to update.
James Smartc71ab862012-10-31 14:44:33 -040013139 *
13140 * This routine is called to perform Linux generic firmware upgrade on device
13141 * that supports such feature.
13142 **/
13143int
13144lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13145{
13146 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13147 int ret;
13148 const struct firmware *fw;
13149
13150 /* Only supported on SLI4 interface type 2 for now */
James Smart27d6ac02018-02-22 08:18:42 -080013151 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
James Smartc71ab862012-10-31 14:44:33 -040013152 LPFC_SLI_INTF_IF_TYPE_2)
13153 return -EPERM;
13154
13155 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13156
13157 if (fw_upgrade == INT_FW_UPGRADE) {
Shawn Guo0733d832021-04-25 10:00:24 +080013158 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
James Smartc71ab862012-10-31 14:44:33 -040013159 file_name, &phba->pcidev->dev,
13160 GFP_KERNEL, (void *)phba,
13161 lpfc_write_firmware);
13162 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13163 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13164 if (!ret)
13165 lpfc_write_firmware(fw, (void *)phba);
13166 } else {
13167 ret = -EINVAL;
13168 }
13169
13170 return ret;
13171}
13172
13173/**
James Smartda0436e2009-05-22 14:51:39 -040013174 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
13175 * @pdev: pointer to PCI device
13176 * @pid: pointer to PCI device identifier
13177 *
13178 * This routine is called from the kernel's PCI subsystem to device with
13179 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13180 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13181 * information of the device and driver to see if the driver state that it
13182 * can support this kind of device. If the match is successful, the driver
13183 * core invokes this routine. If this routine determines it can claim the HBA,
13184 * it does all the initialization that it needs to do to handle the HBA
13185 * properly.
13186 *
13187 * Return code
13188 * 0 - driver can claim the device
13189 * negative value - driver can not claim the device
13190 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080013191static int
James Smartda0436e2009-05-22 14:51:39 -040013192lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13193{
13194 struct lpfc_hba *phba;
13195 struct lpfc_vport *vport = NULL;
James Smart6669f9b2009-10-02 15:16:45 -040013196 struct Scsi_Host *shost = NULL;
James Smart6c621a22017-05-15 15:20:45 -070013197 int error;
James Smartda0436e2009-05-22 14:51:39 -040013198 uint32_t cfg_mode, intr_mode;
James Smartda0436e2009-05-22 14:51:39 -040013199
13200 /* Allocate memory for HBA structure */
13201 phba = lpfc_hba_alloc(pdev);
13202 if (!phba)
13203 return -ENOMEM;
13204
13205 /* Perform generic PCI device enabling operation */
13206 error = lpfc_enable_pci_dev(phba);
James Smart079b5c92011-08-21 21:48:49 -040013207 if (error)
James Smartda0436e2009-05-22 14:51:39 -040013208 goto out_free_phba;
James Smartda0436e2009-05-22 14:51:39 -040013209
13210 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13211 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13212 if (error)
13213 goto out_disable_pci_dev;
13214
13215 /* Set up SLI-4 specific device PCI memory space */
13216 error = lpfc_sli4_pci_mem_setup(phba);
13217 if (error) {
13218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13219 "1410 Failed to set up pci memory space.\n");
13220 goto out_disable_pci_dev;
13221 }
13222
James Smartda0436e2009-05-22 14:51:39 -040013223 /* Set up SLI-4 Specific device driver resources */
13224 error = lpfc_sli4_driver_resource_setup(phba);
13225 if (error) {
13226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13227 "1412 Failed to set up driver resource.\n");
13228 goto out_unset_pci_mem_s4;
13229 }
13230
James Smart19ca7602010-11-20 23:11:55 -050013231 INIT_LIST_HEAD(&phba->active_rrq_list);
James Smart7d791df2011-07-22 18:37:52 -040013232 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
James Smart19ca7602010-11-20 23:11:55 -050013233
James Smartda0436e2009-05-22 14:51:39 -040013234 /* Set up common device driver resources */
13235 error = lpfc_setup_driver_resource_phase2(phba);
13236 if (error) {
13237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13238 "1414 Failed to set up driver resource.\n");
James Smart6c621a22017-05-15 15:20:45 -070013239 goto out_unset_driver_resource_s4;
James Smartda0436e2009-05-22 14:51:39 -040013240 }
13241
James Smart079b5c92011-08-21 21:48:49 -040013242 /* Get the default values for Model Name and Description */
13243 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13244
James Smartda0436e2009-05-22 14:51:39 -040013245 /* Now, trying to enable interrupt and bring up the device */
13246 cfg_mode = phba->cfg_use_msi;
James Smartda0436e2009-05-22 14:51:39 -040013247
James Smart7b15db32013-01-03 15:43:29 -050013248 /* Put device to a known state before enabling interrupt */
James Smartcdb42be2019-01-28 11:14:21 -080013249 phba->pport = NULL;
James Smart7b15db32013-01-03 15:43:29 -050013250 lpfc_stop_port(phba);
James Smart895427b2017-02-12 13:52:30 -080013251
James Smartdcaa2132019-11-04 16:57:06 -080013252 /* Init cpu_map array */
13253 lpfc_cpu_map_array_init(phba);
13254
13255 /* Init hba_eq_hdl array */
13256 lpfc_hba_eq_hdl_array_init(phba);
13257
James Smart7b15db32013-01-03 15:43:29 -050013258 /* Configure and enable interrupt */
13259 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13260 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart7b15db32013-01-03 15:43:29 -050013262 "0426 Failed to enable interrupt.\n");
13263 error = -ENODEV;
James Smartcdb42be2019-01-28 11:14:21 -080013264 goto out_unset_driver_resource;
James Smartda0436e2009-05-22 14:51:39 -040013265 }
James Smart7b15db32013-01-03 15:43:29 -050013266 /* Default to single EQ for non-MSI-X */
James Smart895427b2017-02-12 13:52:30 -080013267 if (phba->intr_type != MSIX) {
James Smart6a828b02019-01-28 11:14:31 -080013268 phba->cfg_irq_chann = 1;
James Smart2d7dbc42017-02-12 13:52:35 -080013269 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
James Smart2d7dbc42017-02-12 13:52:35 -080013270 if (phba->nvmet_support)
13271 phba->cfg_nvmet_mrq = 1;
13272 }
James Smartcdb42be2019-01-28 11:14:21 -080013273 }
James Smart6a828b02019-01-28 11:14:31 -080013274 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
James Smartcdb42be2019-01-28 11:14:21 -080013275
13276 /* Create SCSI host to the physical port */
13277 error = lpfc_create_shost(phba);
13278 if (error) {
13279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13280 "1415 Failed to create scsi host.\n");
13281 goto out_disable_intr;
13282 }
13283 vport = phba->pport;
13284 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13285
13286 /* Configure sysfs attributes */
13287 error = lpfc_alloc_sysfs_attr(vport);
13288 if (error) {
13289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13290 "1416 Failed to allocate sysfs attr\n");
13291 goto out_destroy_shost;
James Smart895427b2017-02-12 13:52:30 -080013292 }
13293
James Smart7b15db32013-01-03 15:43:29 -050013294 /* Set up SLI-4 HBA */
13295 if (lpfc_sli4_hba_setup(phba)) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart7b15db32013-01-03 15:43:29 -050013297 "1421 Failed to set up hba\n");
13298 error = -ENODEV;
James Smartcdb42be2019-01-28 11:14:21 -080013299 goto out_free_sysfs_attr;
James Smart7b15db32013-01-03 15:43:29 -050013300 }
13301
13302 /* Log the current active interrupt mode */
13303 phba->intr_mode = intr_mode;
13304 lpfc_log_intr_mode(phba, intr_mode);
James Smartda0436e2009-05-22 14:51:39 -040013305
13306 /* Perform post initialization setup */
13307 lpfc_post_init_setup(phba);
13308
James Smart01649562017-02-12 13:52:32 -080013309 /* NVME support in FW earlier in the driver load corrects the
13310 * FC4 type making a check for nvme_support unnecessary.
13311 */
James Smart0794d602019-01-28 11:14:19 -080013312 if (phba->nvmet_support == 0) {
13313 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13314 /* Create NVME binding with nvme_fc_transport. This
13315 * ensures the vport is initialized. If the localport
13316 * create fails, it should not unload the driver to
13317 * support field issues.
13318 */
13319 error = lpfc_nvme_create_localport(vport);
13320 if (error) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart0794d602019-01-28 11:14:19 -080013322 "6004 NVME registration "
13323 "failed, error x%x\n",
13324 error);
13325 }
James Smart01649562017-02-12 13:52:32 -080013326 }
13327 }
James Smart895427b2017-02-12 13:52:30 -080013328
James Smartc71ab862012-10-31 14:44:33 -040013329 /* check for firmware upgrade or downgrade */
13330 if (phba->cfg_request_firmware_upgrade)
Sebastian Herbsztdb6f1c22015-08-31 16:48:14 -040013331 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
James Smart52d52442011-05-24 11:42:45 -040013332
James Smart1c6834a2009-07-19 10:01:26 -040013333 /* Check if there are static vports to be created. */
13334 lpfc_create_static_vport(phba);
James Smartd2cc9bc2018-09-10 10:30:50 -070013335
13336 /* Enable RAS FW log support */
13337 lpfc_sli4_ras_setup(phba);
13338
James Smart93a4d6f2019-11-04 16:57:05 -080013339 INIT_LIST_HEAD(&phba->poll_list);
James Smartf861f592020-03-22 11:12:54 -070013340 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
James Smart93a4d6f2019-11-04 16:57:05 -080013341 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13342
James Smartda0436e2009-05-22 14:51:39 -040013343 return 0;
13344
James Smartda0436e2009-05-22 14:51:39 -040013345out_free_sysfs_attr:
13346 lpfc_free_sysfs_attr(vport);
13347out_destroy_shost:
13348 lpfc_destroy_shost(phba);
James Smartcdb42be2019-01-28 11:14:21 -080013349out_disable_intr:
13350 lpfc_sli4_disable_intr(phba);
James Smartda0436e2009-05-22 14:51:39 -040013351out_unset_driver_resource:
13352 lpfc_unset_driver_resource_phase2(phba);
James Smartda0436e2009-05-22 14:51:39 -040013353out_unset_driver_resource_s4:
13354 lpfc_sli4_driver_resource_unset(phba);
13355out_unset_pci_mem_s4:
13356 lpfc_sli4_pci_mem_unset(phba);
13357out_disable_pci_dev:
13358 lpfc_disable_pci_dev(phba);
James Smart6669f9b2009-10-02 15:16:45 -040013359 if (shost)
13360 scsi_host_put(shost);
James Smartda0436e2009-05-22 14:51:39 -040013361out_free_phba:
13362 lpfc_hba_free(phba);
13363 return error;
13364}
13365
13366/**
13367 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
13368 * @pdev: pointer to PCI device
13369 *
13370 * This routine is called from the kernel's PCI subsystem to device with
13371 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13372 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13373 * device to be removed from the PCI subsystem properly.
13374 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080013375static void
James Smartda0436e2009-05-22 14:51:39 -040013376lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13377{
13378 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13379 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13380 struct lpfc_vport **vports;
13381 struct lpfc_hba *phba = vport->phba;
13382 int i;
13383
13384 /* Mark the device unloading flag */
13385 spin_lock_irq(&phba->hbalock);
13386 vport->load_flag |= FC_UNLOADING;
13387 spin_unlock_irq(&phba->hbalock);
13388
James Smartda0436e2009-05-22 14:51:39 -040013389 lpfc_free_sysfs_attr(vport);
13390
13391 /* Release all the vports against this physical port */
13392 vports = lpfc_create_vport_work_array(phba);
13393 if (vports != NULL)
James Smart587a37f2012-05-09 21:16:03 -040013394 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13395 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13396 continue;
James Smartda0436e2009-05-22 14:51:39 -040013397 fc_vport_terminate(vports[i]->fc_vport);
James Smart587a37f2012-05-09 21:16:03 -040013398 }
James Smartda0436e2009-05-22 14:51:39 -040013399 lpfc_destroy_vport_work_array(phba, vports);
13400
James Smart95f0ef82020-11-15 11:26:32 -080013401 /* Remove FC host with the physical port */
James Smartda0436e2009-05-22 14:51:39 -040013402 fc_remove_host(shost);
James Smarte9b11082020-11-15 11:26:33 -080013403 scsi_remove_host(shost);
James Smartda0436e2009-05-22 14:51:39 -040013404
James Smartda0436e2009-05-22 14:51:39 -040013405 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
13406 * localports are destroyed after to cleanup all transport memory.
13407 */
13408 lpfc_cleanup(vport);
13409 lpfc_nvmet_destroy_targetport(phba);
13410 lpfc_nvme_destroy_localport(vport);
13411
James Smartc4908502019-01-28 11:14:28 -080013412 /* De-allocate multi-XRI pools */
13413 if (phba->cfg_xri_rebalancing)
13414 lpfc_destroy_multixri_pools(phba);
13415
James Smart281d6192018-01-30 15:58:47 -080013416 /*
13417 * Bring down the SLI Layer. This step disables all interrupts,
13418 * clears the rings, discards all mailbox commands, and resets
13419 * the HBA FCoE function.
13420 */
13421 lpfc_debugfs_terminate(vport);
James Smartda0436e2009-05-22 14:51:39 -040013422
Dick Kennedy19017622017-09-29 17:34:27 -070013423 lpfc_stop_hba_timers(phba);
James Smart523128e2018-09-10 10:30:46 -070013424 spin_lock_irq(&phba->port_list_lock);
James Smartda0436e2009-05-22 14:51:39 -040013425 list_del_init(&vport->listentry);
James Smart523128e2018-09-10 10:30:46 -070013426 spin_unlock_irq(&phba->port_list_lock);
James Smartda0436e2009-05-22 14:51:39 -040013427
James Smart3677a3a2010-09-29 11:19:14 -040013428 /* Perform scsi free before driver resource_unset since scsi
James Smartda0436e2009-05-22 14:51:39 -040013429 * buffers are released to their corresponding pools here.
13430 */
James Smart5e5b5112019-01-28 11:14:22 -080013431 lpfc_io_free(phba);
James Smart01649562017-02-12 13:52:32 -080013432 lpfc_free_iocb_list(phba);
James Smart5e5b5112019-01-28 11:14:22 -080013433 lpfc_sli4_hba_unset(phba);
James Smart67d12732012-08-03 12:36:13 -040013434
James Smart0cdb84e2018-04-09 14:24:26 -070013435 lpfc_unset_driver_resource_phase2(phba);
James Smartda0436e2009-05-22 14:51:39 -040013436 lpfc_sli4_driver_resource_unset(phba);
13437
13438 /* Unmap adapter Control and Doorbell registers */
13439 lpfc_sli4_pci_mem_unset(phba);
13440
13441 /* Release PCI resources and disable device's PCI function */
13442 scsi_host_put(shost);
13443 lpfc_disable_pci_dev(phba);
13444
13445 /* Finally, free the driver's device data structure */
13446 lpfc_hba_free(phba);
13447
13448 return;
13449}
13450
13451/**
13452 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013453 * @dev_d: pointer to device
James Smartda0436e2009-05-22 14:51:39 -040013454 *
13455 * This routine is called from the kernel's PCI subsystem to support system
13456 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13457 * this method, it quiesces the device by stopping the driver's worker
13458 * thread for the device, turning off device's interrupt and DMA, and bring
13459 * the device offline. Note that as the driver implements the minimum PM
13460 * requirements to a power-aware driver's PM support for suspend/resume -- all
13461 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13462 * method call will be treated as SUSPEND and the driver will fully
13463 * reinitialize its device during resume() method call, the driver will set
13464 * device to PCI_D3hot state in PCI config space instead of setting it
13465 * according to the @msg provided by the PM.
13466 *
13467 * Return code
13468 * 0 - driver suspended the device
13469 * Error otherwise
13470 **/
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013471static int __maybe_unused
13472lpfc_pci_suspend_one_s4(struct device *dev_d)
James Smartda0436e2009-05-22 14:51:39 -040013473{
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013474 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
James Smartda0436e2009-05-22 14:51:39 -040013475 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13476
13477 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart75baf692010-06-08 18:31:21 -040013478 "2843 PCI device Power Management suspend.\n");
James Smartda0436e2009-05-22 14:51:39 -040013479
13480 /* Bring down the device */
James Smart618a5232012-06-12 13:54:36 -040013481 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
James Smartda0436e2009-05-22 14:51:39 -040013482 lpfc_offline(phba);
13483 kthread_stop(phba->worker_thread);
13484
13485 /* Disable interrupt from device */
13486 lpfc_sli4_disable_intr(phba);
James Smart5350d872011-10-10 21:33:49 -040013487 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -040013488
James Smartda0436e2009-05-22 14:51:39 -040013489 return 0;
13490}
13491
13492/**
13493 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013494 * @dev_d: pointer to device
James Smartda0436e2009-05-22 14:51:39 -040013495 *
13496 * This routine is called from the kernel's PCI subsystem to support system
13497 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13498 * this method, it restores the device's PCI config space state and fully
13499 * reinitializes the device and brings it online. Note that as the driver
13500 * implements the minimum PM requirements to a power-aware driver's PM for
13501 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13502 * to the suspend() method call will be treated as SUSPEND and the driver
13503 * will fully reinitialize its device during resume() method call, the device
13504 * will be set to PCI_D0 directly in PCI config space before restoring the
13505 * state.
13506 *
13507 * Return code
13508 * 0 - driver suspended the device
13509 * Error otherwise
13510 **/
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013511static int __maybe_unused
13512lpfc_pci_resume_one_s4(struct device *dev_d)
James Smartda0436e2009-05-22 14:51:39 -040013513{
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013514 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
James Smartda0436e2009-05-22 14:51:39 -040013515 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13516 uint32_t intr_mode;
13517 int error;
13518
13519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13520 "0292 PCI device Power Management resume.\n");
13521
James Smartda0436e2009-05-22 14:51:39 -040013522 /* Startup the kernel thread for this host adapter. */
13523 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13524 "lpfc_worker_%d", phba->brd_no);
13525 if (IS_ERR(phba->worker_thread)) {
13526 error = PTR_ERR(phba->worker_thread);
13527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13528 "0293 PM resume failed to start worker "
13529 "thread: error=x%x.\n", error);
13530 return error;
13531 }
13532
13533 /* Configure and enable interrupt */
13534 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13535 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smartda0436e2009-05-22 14:51:39 -040013537 "0294 PM resume Failed to enable interrupt\n");
13538 return -EIO;
13539 } else
13540 phba->intr_mode = intr_mode;
13541
13542 /* Restart HBA and bring it online */
13543 lpfc_sli_brdrestart(phba);
13544 lpfc_online(phba);
13545
13546 /* Log the current active interrupt mode */
13547 lpfc_log_intr_mode(phba, phba->intr_mode);
13548
13549 return 0;
13550}
13551
13552/**
James Smart75baf692010-06-08 18:31:21 -040013553 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13554 * @phba: pointer to lpfc hba data structure.
13555 *
13556 * This routine is called to prepare the SLI4 device for PCI slot recover. It
13557 * aborts all the outstanding SCSI I/Os to the pci device.
13558 **/
13559static void
13560lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13561{
Dick Kennedy372c1872020-06-30 14:50:00 -070013562 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart75baf692010-06-08 18:31:21 -040013563 "2828 PCI channel I/O abort preparing for recovery\n");
13564 /*
13565 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13566 * and let the SCSI mid-layer to retry them to recover.
13567 */
James Smartdb55fba2014-04-04 13:52:02 -040013568 lpfc_sli_abort_fcp_rings(phba);
James Smart75baf692010-06-08 18:31:21 -040013569}
13570
13571/**
13572 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13573 * @phba: pointer to lpfc hba data structure.
13574 *
13575 * This routine is called to prepare the SLI4 device for PCI slot reset. It
13576 * disables the device interrupt and pci device, and aborts the internal FCP
13577 * pending I/Os.
13578 **/
13579static void
13580lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13581{
Dick Kennedy372c1872020-06-30 14:50:00 -070013582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart75baf692010-06-08 18:31:21 -040013583 "2826 PCI channel disable preparing for reset\n");
13584
13585 /* Block any management I/Os to the device */
James Smart618a5232012-06-12 13:54:36 -040013586 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
James Smart75baf692010-06-08 18:31:21 -040013587
13588 /* Block all SCSI devices' I/Os on the host */
13589 lpfc_scsi_dev_block(phba);
13590
James Smartc00f62e2019-08-14 16:57:11 -070013591 /* Flush all driver's outstanding I/Os as we are to reset */
13592 lpfc_sli_flush_io_rings(phba);
James Smartc3725bd2017-11-20 16:00:42 -080013593
James Smart75baf692010-06-08 18:31:21 -040013594 /* stop all timers */
13595 lpfc_stop_hba_timers(phba);
13596
13597 /* Disable interrupt and pci device */
13598 lpfc_sli4_disable_intr(phba);
James Smart5350d872011-10-10 21:33:49 -040013599 lpfc_sli4_queue_destroy(phba);
James Smart75baf692010-06-08 18:31:21 -040013600 pci_disable_device(phba->pcidev);
James Smart75baf692010-06-08 18:31:21 -040013601}
13602
13603/**
13604 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13605 * @phba: pointer to lpfc hba data structure.
13606 *
13607 * This routine is called to prepare the SLI4 device for PCI slot permanently
13608 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13609 * pending I/Os.
13610 **/
13611static void
13612lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13613{
Dick Kennedy372c1872020-06-30 14:50:00 -070013614 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart75baf692010-06-08 18:31:21 -040013615 "2827 PCI channel permanent disable for failure\n");
13616
13617 /* Block all SCSI devices' I/Os on the host */
13618 lpfc_scsi_dev_block(phba);
13619
13620 /* stop all timers */
13621 lpfc_stop_hba_timers(phba);
13622
James Smartc00f62e2019-08-14 16:57:11 -070013623 /* Clean up all driver's outstanding I/Os */
13624 lpfc_sli_flush_io_rings(phba);
James Smart75baf692010-06-08 18:31:21 -040013625}
13626
13627/**
James Smartda0436e2009-05-22 14:51:39 -040013628 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13629 * @pdev: pointer to PCI device.
13630 * @state: the current PCI connection state.
13631 *
13632 * This routine is called from the PCI subsystem for error handling to device
13633 * with SLI-4 interface spec. This function is called by the PCI subsystem
13634 * after a PCI bus error affecting this device has been detected. When this
13635 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13636 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13637 * for the PCI subsystem to perform proper recovery as desired.
13638 *
13639 * Return codes
13640 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13641 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13642 **/
13643static pci_ers_result_t
13644lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13645{
James Smart75baf692010-06-08 18:31:21 -040013646 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13647 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13648
13649 switch (state) {
13650 case pci_channel_io_normal:
13651 /* Non-fatal error, prepare for recovery */
13652 lpfc_sli4_prep_dev_for_recover(phba);
13653 return PCI_ERS_RESULT_CAN_RECOVER;
13654 case pci_channel_io_frozen:
13655 /* Fatal error, prepare for slot reset */
13656 lpfc_sli4_prep_dev_for_reset(phba);
13657 return PCI_ERS_RESULT_NEED_RESET;
13658 case pci_channel_io_perm_failure:
13659 /* Permanent failure, prepare for device down */
13660 lpfc_sli4_prep_dev_for_perm_failure(phba);
13661 return PCI_ERS_RESULT_DISCONNECT;
13662 default:
13663 /* Unknown state, prepare and request slot reset */
Dick Kennedy372c1872020-06-30 14:50:00 -070013664 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart75baf692010-06-08 18:31:21 -040013665 "2825 Unknown PCI error state: x%x\n", state);
13666 lpfc_sli4_prep_dev_for_reset(phba);
13667 return PCI_ERS_RESULT_NEED_RESET;
13668 }
James Smartda0436e2009-05-22 14:51:39 -040013669}
13670
13671/**
13672 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13673 * @pdev: pointer to PCI device.
13674 *
13675 * This routine is called from the PCI subsystem for error handling to device
13676 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13677 * restart the PCI card from scratch, as if from a cold-boot. During the
13678 * PCI subsystem error recovery, after the driver returns
13679 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13680 * recovery and then call this routine before calling the .resume method to
13681 * recover the device. This function will initialize the HBA device, enable
13682 * the interrupt, but it will just put the HBA to offline state without
13683 * passing any I/O traffic.
13684 *
13685 * Return codes
13686 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13687 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13688 */
13689static pci_ers_result_t
13690lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13691{
James Smart75baf692010-06-08 18:31:21 -040013692 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13693 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13694 struct lpfc_sli *psli = &phba->sli;
13695 uint32_t intr_mode;
13696
13697 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13698 if (pci_enable_device_mem(pdev)) {
13699 printk(KERN_ERR "lpfc: Cannot re-enable "
13700 "PCI device after reset.\n");
13701 return PCI_ERS_RESULT_DISCONNECT;
13702 }
13703
13704 pci_restore_state(pdev);
James Smart0a96e972011-07-22 18:37:28 -040013705
13706 /*
13707 * As the new kernel behavior of pci_restore_state() API call clears
13708 * device saved_state flag, need to save the restored state again.
13709 */
13710 pci_save_state(pdev);
13711
James Smart75baf692010-06-08 18:31:21 -040013712 if (pdev->is_busmaster)
13713 pci_set_master(pdev);
13714
13715 spin_lock_irq(&phba->hbalock);
13716 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13717 spin_unlock_irq(&phba->hbalock);
13718
13719 /* Configure and enable interrupt */
13720 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13721 if (intr_mode == LPFC_INTR_ERROR) {
Dick Kennedy372c1872020-06-30 14:50:00 -070013722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart75baf692010-06-08 18:31:21 -040013723 "2824 Cannot re-enable interrupt after "
13724 "slot reset.\n");
13725 return PCI_ERS_RESULT_DISCONNECT;
13726 } else
13727 phba->intr_mode = intr_mode;
13728
13729 /* Log the current active interrupt mode */
13730 lpfc_log_intr_mode(phba, phba->intr_mode);
13731
James Smartda0436e2009-05-22 14:51:39 -040013732 return PCI_ERS_RESULT_RECOVERED;
13733}
13734
13735/**
13736 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13737 * @pdev: pointer to PCI device
13738 *
13739 * This routine is called from the PCI subsystem for error handling to device
13740 * with SLI-4 interface spec. It is called when kernel error recovery tells
13741 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13742 * error recovery. After this call, traffic can start to flow from this device
13743 * again.
13744 **/
13745static void
13746lpfc_io_resume_s4(struct pci_dev *pdev)
13747{
James Smart75baf692010-06-08 18:31:21 -040013748 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13749 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13750
13751 /*
13752 * In case of slot reset, as function reset is performed through
13753 * mailbox command which needs DMA to be enabled, this operation
13754 * has to be moved to the io resume phase. Taking device offline
13755 * will perform the necessary cleanup.
13756 */
13757 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13758 /* Perform device reset */
James Smart618a5232012-06-12 13:54:36 -040013759 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
James Smart75baf692010-06-08 18:31:21 -040013760 lpfc_offline(phba);
13761 lpfc_sli_brdrestart(phba);
13762 /* Bring the device back online */
13763 lpfc_online(phba);
13764 }
James Smartda0436e2009-05-22 14:51:39 -040013765}
13766
13767/**
James Smart3772a992009-05-22 14:50:54 -040013768 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13769 * @pdev: pointer to PCI device
13770 * @pid: pointer to PCI device identifier
13771 *
13772 * This routine is to be registered to the kernel's PCI subsystem. When an
13773 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13774 * at PCI device-specific information of the device and driver to see if the
13775 * driver state that it can support this kind of device. If the match is
13776 * successful, the driver core invokes this routine. This routine dispatches
13777 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13778 * do all the initialization that it needs to do to handle the HBA device
13779 * properly.
13780 *
13781 * Return code
13782 * 0 - driver can claim the device
13783 * negative value - driver can not claim the device
13784 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080013785static int
James Smart3772a992009-05-22 14:50:54 -040013786lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13787{
13788 int rc;
James Smart8fa38512009-07-19 10:01:03 -040013789 struct lpfc_sli_intf intf;
James Smart3772a992009-05-22 14:50:54 -040013790
James Smart28baac72010-02-12 14:42:03 -050013791 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
James Smart3772a992009-05-22 14:50:54 -040013792 return -ENODEV;
13793
James Smart8fa38512009-07-19 10:01:03 -040013794 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
James Smart28baac72010-02-12 14:42:03 -050013795 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
James Smartda0436e2009-05-22 14:51:39 -040013796 rc = lpfc_pci_probe_one_s4(pdev, pid);
James Smart8fa38512009-07-19 10:01:03 -040013797 else
James Smart3772a992009-05-22 14:50:54 -040013798 rc = lpfc_pci_probe_one_s3(pdev, pid);
James Smart8fa38512009-07-19 10:01:03 -040013799
James Smart3772a992009-05-22 14:50:54 -040013800 return rc;
13801}
13802
13803/**
13804 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13805 * @pdev: pointer to PCI device
13806 *
13807 * This routine is to be registered to the kernel's PCI subsystem. When an
13808 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13809 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13810 * remove routine, which will perform all the necessary cleanup for the
13811 * device to be removed from the PCI subsystem properly.
13812 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080013813static void
James Smart3772a992009-05-22 14:50:54 -040013814lpfc_pci_remove_one(struct pci_dev *pdev)
13815{
13816 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13817 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13818
13819 switch (phba->pci_dev_grp) {
13820 case LPFC_PCI_DEV_LP:
13821 lpfc_pci_remove_one_s3(pdev);
13822 break;
James Smartda0436e2009-05-22 14:51:39 -040013823 case LPFC_PCI_DEV_OC:
13824 lpfc_pci_remove_one_s4(pdev);
13825 break;
James Smart3772a992009-05-22 14:50:54 -040013826 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070013827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040013828 "1424 Invalid PCI device group: 0x%x\n",
13829 phba->pci_dev_grp);
13830 break;
13831 }
13832 return;
13833}
13834
13835/**
13836 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013837 * @dev: pointer to device
James Smart3772a992009-05-22 14:50:54 -040013838 *
13839 * This routine is to be registered to the kernel's PCI subsystem to support
13840 * system Power Management (PM). When PM invokes this method, it dispatches
13841 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13842 * suspend the device.
13843 *
13844 * Return code
13845 * 0 - driver suspended the device
13846 * Error otherwise
13847 **/
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013848static int __maybe_unused
13849lpfc_pci_suspend_one(struct device *dev)
James Smart3772a992009-05-22 14:50:54 -040013850{
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013851 struct Scsi_Host *shost = dev_get_drvdata(dev);
James Smart3772a992009-05-22 14:50:54 -040013852 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13853 int rc = -ENODEV;
13854
13855 switch (phba->pci_dev_grp) {
13856 case LPFC_PCI_DEV_LP:
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013857 rc = lpfc_pci_suspend_one_s3(dev);
James Smart3772a992009-05-22 14:50:54 -040013858 break;
James Smartda0436e2009-05-22 14:51:39 -040013859 case LPFC_PCI_DEV_OC:
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013860 rc = lpfc_pci_suspend_one_s4(dev);
James Smartda0436e2009-05-22 14:51:39 -040013861 break;
James Smart3772a992009-05-22 14:50:54 -040013862 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070013863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040013864 "1425 Invalid PCI device group: 0x%x\n",
13865 phba->pci_dev_grp);
13866 break;
13867 }
13868 return rc;
13869}
13870
13871/**
13872 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013873 * @dev: pointer to device
James Smart3772a992009-05-22 14:50:54 -040013874 *
13875 * This routine is to be registered to the kernel's PCI subsystem to support
13876 * system Power Management (PM). When PM invokes this method, it dispatches
13877 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13878 * resume the device.
13879 *
13880 * Return code
13881 * 0 - driver suspended the device
13882 * Error otherwise
13883 **/
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013884static int __maybe_unused
13885lpfc_pci_resume_one(struct device *dev)
James Smart3772a992009-05-22 14:50:54 -040013886{
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013887 struct Scsi_Host *shost = dev_get_drvdata(dev);
James Smart3772a992009-05-22 14:50:54 -040013888 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13889 int rc = -ENODEV;
13890
13891 switch (phba->pci_dev_grp) {
13892 case LPFC_PCI_DEV_LP:
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013893 rc = lpfc_pci_resume_one_s3(dev);
James Smart3772a992009-05-22 14:50:54 -040013894 break;
James Smartda0436e2009-05-22 14:51:39 -040013895 case LPFC_PCI_DEV_OC:
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053013896 rc = lpfc_pci_resume_one_s4(dev);
James Smartda0436e2009-05-22 14:51:39 -040013897 break;
James Smart3772a992009-05-22 14:50:54 -040013898 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070013899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040013900 "1426 Invalid PCI device group: 0x%x\n",
13901 phba->pci_dev_grp);
13902 break;
13903 }
13904 return rc;
13905}
13906
13907/**
13908 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13909 * @pdev: pointer to PCI device.
13910 * @state: the current PCI connection state.
13911 *
13912 * This routine is registered to the PCI subsystem for error handling. This
13913 * function is called by the PCI subsystem after a PCI bus error affecting
13914 * this device has been detected. When this routine is invoked, it dispatches
13915 * the action to the proper SLI-3 or SLI-4 device error detected handling
13916 * routine, which will perform the proper error detected operation.
13917 *
13918 * Return codes
13919 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13920 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13921 **/
13922static pci_ers_result_t
13923lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13924{
13925 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13926 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13927 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13928
13929 switch (phba->pci_dev_grp) {
13930 case LPFC_PCI_DEV_LP:
13931 rc = lpfc_io_error_detected_s3(pdev, state);
13932 break;
James Smartda0436e2009-05-22 14:51:39 -040013933 case LPFC_PCI_DEV_OC:
13934 rc = lpfc_io_error_detected_s4(pdev, state);
13935 break;
James Smart3772a992009-05-22 14:50:54 -040013936 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070013937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040013938 "1427 Invalid PCI device group: 0x%x\n",
13939 phba->pci_dev_grp);
13940 break;
13941 }
13942 return rc;
13943}
13944
13945/**
13946 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13947 * @pdev: pointer to PCI device.
13948 *
13949 * This routine is registered to the PCI subsystem for error handling. This
13950 * function is called after PCI bus has been reset to restart the PCI card
13951 * from scratch, as if from a cold-boot. When this routine is invoked, it
13952 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13953 * routine, which will perform the proper device reset.
13954 *
13955 * Return codes
13956 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13957 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13958 **/
13959static pci_ers_result_t
13960lpfc_io_slot_reset(struct pci_dev *pdev)
13961{
13962 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13963 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13964 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13965
13966 switch (phba->pci_dev_grp) {
13967 case LPFC_PCI_DEV_LP:
13968 rc = lpfc_io_slot_reset_s3(pdev);
13969 break;
James Smartda0436e2009-05-22 14:51:39 -040013970 case LPFC_PCI_DEV_OC:
13971 rc = lpfc_io_slot_reset_s4(pdev);
13972 break;
James Smart3772a992009-05-22 14:50:54 -040013973 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070013974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040013975 "1428 Invalid PCI device group: 0x%x\n",
13976 phba->pci_dev_grp);
13977 break;
13978 }
13979 return rc;
13980}
13981
13982/**
13983 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13984 * @pdev: pointer to PCI device
13985 *
13986 * This routine is registered to the PCI subsystem for error handling. It
13987 * is called when kernel error recovery tells the lpfc driver that it is
13988 * OK to resume normal PCI operation after PCI bus error recovery. When
13989 * this routine is invoked, it dispatches the action to the proper SLI-3
13990 * or SLI-4 device io_resume routine, which will resume the device operation.
13991 **/
13992static void
13993lpfc_io_resume(struct pci_dev *pdev)
13994{
13995 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13996 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13997
13998 switch (phba->pci_dev_grp) {
13999 case LPFC_PCI_DEV_LP:
14000 lpfc_io_resume_s3(pdev);
14001 break;
James Smartda0436e2009-05-22 14:51:39 -040014002 case LPFC_PCI_DEV_OC:
14003 lpfc_io_resume_s4(pdev);
14004 break;
James Smart3772a992009-05-22 14:50:54 -040014005 default:
Dick Kennedy372c1872020-06-30 14:50:00 -070014006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
James Smart3772a992009-05-22 14:50:54 -040014007 "1429 Invalid PCI device group: 0x%x\n",
14008 phba->pci_dev_grp);
14009 break;
14010 }
14011 return;
14012}
14013
James Smart1ba981f2014-02-20 09:56:45 -050014014/**
14015 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
14016 * @phba: pointer to lpfc hba data structure.
14017 *
14018 * This routine checks to see if OAS is supported for this adapter. If
14019 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
14020 * the enable oas flag is cleared and the pool created for OAS device data
14021 * is destroyed.
14022 *
14023 **/
YueHaibingc7092972019-04-17 23:05:54 +080014024static void
James Smart1ba981f2014-02-20 09:56:45 -050014025lpfc_sli4_oas_verify(struct lpfc_hba *phba)
14026{
14027
14028 if (!phba->cfg_EnableXLane)
14029 return;
14030
14031 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
14032 phba->cfg_fof = 1;
14033 } else {
James Smartf38fa0b2014-04-04 13:52:21 -040014034 phba->cfg_fof = 0;
Saurav Girepunjec3e5aac2019-10-27 01:17:17 +053014035 mempool_destroy(phba->device_data_mem_pool);
James Smart1ba981f2014-02-20 09:56:45 -050014036 phba->device_data_mem_pool = NULL;
14037 }
14038
14039 return;
14040}
14041
14042/**
James Smartd2cc9bc2018-09-10 10:30:50 -070014043 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
14044 * @phba: pointer to lpfc hba data structure.
14045 *
14046 * This routine checks to see if RAS is supported by the adapter. Check the
14047 * function through which RAS support enablement is to be done.
14048 **/
14049void
14050lpfc_sli4_ras_init(struct lpfc_hba *phba)
14051{
14052 switch (phba->pcidev->device) {
14053 case PCI_DEVICE_ID_LANCER_G6_FC:
14054 case PCI_DEVICE_ID_LANCER_G7_FC:
14055 phba->ras_fwlog.ras_hwsupport = true;
James Smartcb349902018-11-29 16:09:27 -080014056 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
14057 phba->cfg_ras_fwlog_buffsize)
James Smartd2cc9bc2018-09-10 10:30:50 -070014058 phba->ras_fwlog.ras_enabled = true;
14059 else
14060 phba->ras_fwlog.ras_enabled = false;
14061 break;
14062 default:
14063 phba->ras_fwlog.ras_hwsupport = false;
14064 }
14065}
14066
James Smart1ba981f2014-02-20 09:56:45 -050014067
dea31012005-04-17 16:05:31 -050014068MODULE_DEVICE_TABLE(pci, lpfc_id_table);
14069
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070014070static const struct pci_error_handlers lpfc_err_handler = {
Linas Vepstas8d63f372007-02-14 14:28:36 -060014071 .error_detected = lpfc_io_error_detected,
14072 .slot_reset = lpfc_io_slot_reset,
14073 .resume = lpfc_io_resume,
14074};
14075
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053014076static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
14077 lpfc_pci_suspend_one,
14078 lpfc_pci_resume_one);
14079
dea31012005-04-17 16:05:31 -050014080static struct pci_driver lpfc_driver = {
14081 .name = LPFC_DRIVER_NAME,
14082 .id_table = lpfc_id_table,
14083 .probe = lpfc_pci_probe_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080014084 .remove = lpfc_pci_remove_one,
Anton Blanchard85e8a232017-02-13 08:49:20 +110014085 .shutdown = lpfc_pci_remove_one,
Vaibhav Guptaef6fa162020-11-02 22:17:18 +053014086 .driver.pm = &lpfc_pci_pm_ops_one,
James Smart2e0fef82007-06-17 19:56:36 -050014087 .err_handler = &lpfc_err_handler,
dea31012005-04-17 16:05:31 -050014088};
14089
James Smart3ef6d242012-01-18 16:23:48 -050014090static const struct file_operations lpfc_mgmt_fop = {
Al Viro858feac2013-04-14 22:39:37 -040014091 .owner = THIS_MODULE,
James Smart3ef6d242012-01-18 16:23:48 -050014092};
14093
14094static struct miscdevice lpfc_mgmt_dev = {
14095 .minor = MISC_DYNAMIC_MINOR,
14096 .name = "lpfcmgmt",
14097 .fops = &lpfc_mgmt_fop,
14098};
14099
James Smarte59058c2008-08-24 21:49:00 -040014100/**
James Smart3621a712009-04-06 18:47:14 -040014101 * lpfc_init - lpfc module initialization routine
James Smarte59058c2008-08-24 21:49:00 -040014102 *
14103 * This routine is to be invoked when the lpfc module is loaded into the
14104 * kernel. The special kernel macro module_init() is used to indicate the
14105 * role of this routine to the kernel as lpfc module entry point.
14106 *
14107 * Return codes
14108 * 0 - successful
14109 * -ENOMEM - FC attach transport failed
14110 * all others - failed
14111 */
dea31012005-04-17 16:05:31 -050014112static int __init
14113lpfc_init(void)
14114{
14115 int error = 0;
14116
Anton Blanchardbc2736e2020-07-13 18:39:08 +100014117 pr_info(LPFC_MODULE_DESC "\n");
14118 pr_info(LPFC_COPYRIGHT "\n");
dea31012005-04-17 16:05:31 -050014119
James Smart3ef6d242012-01-18 16:23:48 -050014120 error = misc_register(&lpfc_mgmt_dev);
14121 if (error)
14122 printk(KERN_ERR "Could not register lpfcmgmt device, "
14123 "misc_register returned with status %d", error);
14124
Jing Xiangfeng1eaff532020-07-31 14:56:39 +080014125 error = -ENOMEM;
James Smart458c0832016-07-06 12:36:07 -070014126 lpfc_transport_functions.vport_create = lpfc_vport_create;
14127 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
dea31012005-04-17 16:05:31 -050014128 lpfc_transport_template =
14129 fc_attach_transport(&lpfc_transport_functions);
James Smart7ee5d432007-10-27 13:37:17 -040014130 if (lpfc_transport_template == NULL)
Jing Xiangfeng1eaff532020-07-31 14:56:39 +080014131 goto unregister;
James Smart458c0832016-07-06 12:36:07 -070014132 lpfc_vport_transport_template =
14133 fc_attach_transport(&lpfc_vport_transport_functions);
14134 if (lpfc_vport_transport_template == NULL) {
14135 fc_release_transport(lpfc_transport_template);
Jing Xiangfeng1eaff532020-07-31 14:56:39 +080014136 goto unregister;
James Smart7ee5d432007-10-27 13:37:17 -040014137 }
James Smart840a4702020-11-15 11:26:40 -080014138 lpfc_wqe_cmd_template();
James Smartbd3061b2018-03-05 12:04:05 -080014139 lpfc_nvmet_cmd_template();
James Smart7bb03bb2013-04-17 20:19:16 -040014140
14141 /* Initialize in case vector mapping is needed */
James Smart2ea259e2017-02-12 13:52:27 -080014142 lpfc_present_cpu = num_present_cpus();
James Smart7bb03bb2013-04-17 20:19:16 -040014143
James Smart93a4d6f2019-11-04 16:57:05 -080014144 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14145 "lpfc/sli4:online",
14146 lpfc_cpu_online, lpfc_cpu_offline);
14147 if (error < 0)
14148 goto cpuhp_failure;
14149 lpfc_cpuhp_state = error;
14150
dea31012005-04-17 16:05:31 -050014151 error = pci_register_driver(&lpfc_driver);
James Smart93a4d6f2019-11-04 16:57:05 -080014152 if (error)
14153 goto unwind;
14154
14155 return error;
14156
14157unwind:
14158 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14159cpuhp_failure:
14160 fc_release_transport(lpfc_transport_template);
14161 fc_release_transport(lpfc_vport_transport_template);
Jing Xiangfeng1eaff532020-07-31 14:56:39 +080014162unregister:
14163 misc_deregister(&lpfc_mgmt_dev);
dea31012005-04-17 16:05:31 -050014164
14165 return error;
14166}
14167
Dick Kennedy372c1872020-06-30 14:50:00 -070014168void lpfc_dmp_dbg(struct lpfc_hba *phba)
14169{
14170 unsigned int start_idx;
14171 unsigned int dbg_cnt;
14172 unsigned int temp_idx;
14173 int i;
14174 int j = 0;
James Smarte8613082021-07-07 11:43:33 -070014175 unsigned long rem_nsec, iflags;
14176 bool log_verbose = false;
14177 struct lpfc_vport *port_iterator;
Dick Kennedy372c1872020-06-30 14:50:00 -070014178
James Smart0b3ad322021-01-04 10:02:39 -080014179 /* Don't dump messages if we explicitly set log_verbose for the
14180 * physical port or any vport.
14181 */
Dick Kennedy372c1872020-06-30 14:50:00 -070014182 if (phba->cfg_log_verbose)
14183 return;
14184
James Smarte8613082021-07-07 11:43:33 -070014185 spin_lock_irqsave(&phba->port_list_lock, iflags);
14186 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
14187 if (port_iterator->load_flag & FC_UNLOADING)
14188 continue;
14189 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
14190 if (port_iterator->cfg_log_verbose)
14191 log_verbose = true;
14192
14193 scsi_host_put(lpfc_shost_from_vport(port_iterator));
14194
14195 if (log_verbose) {
14196 spin_unlock_irqrestore(&phba->port_list_lock,
14197 iflags);
James Smart0b3ad322021-01-04 10:02:39 -080014198 return;
14199 }
14200 }
14201 }
James Smarte8613082021-07-07 11:43:33 -070014202 spin_unlock_irqrestore(&phba->port_list_lock, iflags);
James Smart0b3ad322021-01-04 10:02:39 -080014203
Dick Kennedy372c1872020-06-30 14:50:00 -070014204 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14205 return;
14206
14207 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14208 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
James Smart0b3ad322021-01-04 10:02:39 -080014209 if (!dbg_cnt)
14210 goto out;
Dick Kennedy372c1872020-06-30 14:50:00 -070014211 temp_idx = start_idx;
14212 if (dbg_cnt >= DBG_LOG_SZ) {
14213 dbg_cnt = DBG_LOG_SZ;
14214 temp_idx -= 1;
14215 } else {
14216 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14217 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14218 } else {
Dick Kennedy77dd7d72020-07-06 13:42:46 -070014219 if (start_idx < dbg_cnt)
Dick Kennedy372c1872020-06-30 14:50:00 -070014220 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
Dick Kennedy77dd7d72020-07-06 13:42:46 -070014221 else
Dick Kennedy372c1872020-06-30 14:50:00 -070014222 start_idx -= dbg_cnt;
Dick Kennedy372c1872020-06-30 14:50:00 -070014223 }
14224 }
14225 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14226 start_idx, temp_idx, dbg_cnt);
14227
14228 for (i = 0; i < dbg_cnt; i++) {
14229 if ((start_idx + i) < DBG_LOG_SZ)
Dick Kennedy77dd7d72020-07-06 13:42:46 -070014230 temp_idx = (start_idx + i) % DBG_LOG_SZ;
Dick Kennedy372c1872020-06-30 14:50:00 -070014231 else
14232 temp_idx = j++;
14233 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14234 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14235 temp_idx,
14236 (unsigned long)phba->dbg_log[temp_idx].t_ns,
14237 rem_nsec / 1000,
14238 phba->dbg_log[temp_idx].log);
14239 }
James Smart0b3ad322021-01-04 10:02:39 -080014240out:
Dick Kennedy372c1872020-06-30 14:50:00 -070014241 atomic_set(&phba->dbg_log_cnt, 0);
14242 atomic_set(&phba->dbg_log_dmping, 0);
14243}
14244
Lee Jones7fa03c72020-07-23 13:24:21 +010014245__printf(2, 3)
Dick Kennedy372c1872020-06-30 14:50:00 -070014246void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14247{
14248 unsigned int idx;
14249 va_list args;
14250 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14251 struct va_format vaf;
14252
14253
14254 va_start(args, fmt);
14255 if (unlikely(dbg_dmping)) {
14256 vaf.fmt = fmt;
14257 vaf.va = &args;
14258 dev_info(&phba->pcidev->dev, "%pV", &vaf);
14259 va_end(args);
14260 return;
14261 }
14262 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14263 DBG_LOG_SZ;
14264
14265 atomic_inc(&phba->dbg_log_cnt);
14266
14267 vscnprintf(phba->dbg_log[idx].log,
14268 sizeof(phba->dbg_log[idx].log), fmt, args);
14269 va_end(args);
14270
14271 phba->dbg_log[idx].t_ns = local_clock();
14272}
14273
James Smarte59058c2008-08-24 21:49:00 -040014274/**
James Smart3621a712009-04-06 18:47:14 -040014275 * lpfc_exit - lpfc module removal routine
James Smarte59058c2008-08-24 21:49:00 -040014276 *
14277 * This routine is invoked when the lpfc module is removed from the kernel.
14278 * The special kernel macro module_exit() is used to indicate the role of
14279 * this routine to the kernel as lpfc module exit point.
14280 */
dea31012005-04-17 16:05:31 -050014281static void __exit
14282lpfc_exit(void)
14283{
James Smart3ef6d242012-01-18 16:23:48 -050014284 misc_deregister(&lpfc_mgmt_dev);
dea31012005-04-17 16:05:31 -050014285 pci_unregister_driver(&lpfc_driver);
James Smart93a4d6f2019-11-04 16:57:05 -080014286 cpuhp_remove_multi_state(lpfc_cpuhp_state);
dea31012005-04-17 16:05:31 -050014287 fc_release_transport(lpfc_transport_template);
James Smart458c0832016-07-06 12:36:07 -070014288 fc_release_transport(lpfc_vport_transport_template);
Johannes Thumshirn79739672015-08-31 16:48:11 -040014289 idr_destroy(&lpfc_hba_index);
dea31012005-04-17 16:05:31 -050014290}
14291
14292module_init(lpfc_init);
14293module_exit(lpfc_exit);
14294MODULE_LICENSE("GPL");
14295MODULE_DESCRIPTION(LPFC_MODULE_DESC);
James Smartd080abe2017-02-12 13:52:39 -080014296MODULE_AUTHOR("Broadcom");
dea31012005-04-17 16:05:31 -050014297MODULE_VERSION("0:" LPFC_DRIVER_VERSION);