blob: e5d644f6e09895af2964ad33982f77c21952a604 [file] [log] [blame]
Dupuis, Chad61d86582017-02-15 06:28:23 -08001/*
2 * QLogic FCoE Offload Driver
Chad Dupuis5d1c8b52018-04-25 06:09:04 -07003 * Copyright (c) 2016-2018 Cavium Inc.
Dupuis, Chad61d86582017-02-15 06:28:23 -08004 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/device.h>
14#include <linux/highmem.h>
15#include <linux/crc32.h>
16#include <linux/interrupt.h>
17#include <linux/list.h>
18#include <linux/kthread.h>
19#include <scsi/libfc.h>
20#include <scsi/scsi_host.h>
Chad Dupuisa3cd42a92017-08-15 10:08:17 -070021#include <scsi/fc_frame.h>
Dupuis, Chad61d86582017-02-15 06:28:23 -080022#include <linux/if_ether.h>
23#include <linux/if_vlan.h>
24#include <linux/cpu.h>
25#include "qedf.h"
Arnd Bergmannd9ea4632018-02-02 14:12:18 +010026#include "qedf_dbg.h"
Chad Dupuis5185b322017-05-31 06:33:48 -070027#include <uapi/linux/pci_regs.h>
Dupuis, Chad61d86582017-02-15 06:28:23 -080028
29const struct qed_fcoe_ops *qed_ops;
30
31static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
32static void qedf_remove(struct pci_dev *pdev);
33
Dupuis, Chad61d86582017-02-15 06:28:23 -080034/*
35 * Driver module parameters.
36 */
37static unsigned int qedf_dev_loss_tmo = 60;
38module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
39MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
40 "remote ports (default 60)");
41
42uint qedf_debug = QEDF_LOG_INFO;
43module_param_named(debug, qedf_debug, uint, S_IRUGO);
Chad Dupuis0516abd2017-08-15 10:08:20 -070044MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
Dupuis, Chad61d86582017-02-15 06:28:23 -080045 " mask");
46
Chad Dupuisc3ef86f2018-04-25 06:08:46 -070047static uint qedf_fipvlan_retries = 60;
Dupuis, Chad61d86582017-02-15 06:28:23 -080048module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
49MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
Chad Dupuisc3ef86f2018-04-25 06:08:46 -070050 "before giving up (default 60)");
Dupuis, Chad61d86582017-02-15 06:28:23 -080051
52static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
53module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
54MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
55 "(default 1002).");
56
Chad Dupuis84b2ba62018-04-25 06:08:52 -070057static int qedf_default_prio = -1;
Dupuis, Chad61d86582017-02-15 06:28:23 -080058module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
Chad Dupuis84b2ba62018-04-25 06:08:52 -070059MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
60 " traffic (value between 0 and 7, default 3).");
Dupuis, Chad61d86582017-02-15 06:28:23 -080061
62uint qedf_dump_frames;
63module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
65 "(default off)");
66
67static uint qedf_queue_depth;
68module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
69MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
70 "by the qedf driver. Default is 0 (use OS default).");
71
72uint qedf_io_tracing;
73module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
74MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
75 "into trace buffer. (default off).");
76
77static uint qedf_max_lun = MAX_FIBRE_LUNS;
78module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
79MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
80 "supports. (default 0xffffffff)");
81
82uint qedf_link_down_tmo;
83module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
84MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
85 "link is down by N seconds.");
86
87bool qedf_retry_delay;
88module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
90 "delay handling (default off).");
91
Chad Dupuisba17d372018-04-25 06:08:51 -070092static bool qedf_dcbx_no_wait;
93module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
95 "sending FIP VLAN requests on link up (Default: off).");
96
Dupuis, Chad61d86582017-02-15 06:28:23 -080097static uint qedf_dp_module;
98module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
99MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
100 "qed module during probe.");
101
Chad Dupuis2b82a622017-05-31 06:33:54 -0700102static uint qedf_dp_level = QED_LEVEL_NOTICE;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800103module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
104MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
105 "during probe (0-3: 0 more verbose).");
106
107struct workqueue_struct *qedf_io_wq;
108
109static struct fcoe_percpu_s qedf_global;
110static DEFINE_SPINLOCK(qedf_global_lock);
111
112static struct kmem_cache *qedf_io_work_cache;
113
114void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
115{
116 qedf->vlan_id = vlan_id;
Chad Dupuis84b2ba62018-04-25 06:08:52 -0700117 qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800118 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
Chad Dupuis84b2ba62018-04-25 06:08:52 -0700119 "prio=%d.\n", vlan_id, qedf->prio);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800120}
121
122/* Returns true if we have a valid vlan, false otherwise */
123static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
124{
125 int rc;
126
127 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
128 QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
129 return false;
130 }
131
132 while (qedf->fipvlan_retries--) {
133 if (qedf->vlan_id > 0)
134 return true;
135 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
136 "Retry %d.\n", qedf->fipvlan_retries);
137 init_completion(&qedf->fipvlan_compl);
138 qedf_fcoe_send_vlan_req(qedf);
139 rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
140 1 * HZ);
141 if (rc > 0) {
142 fcoe_ctlr_link_up(&qedf->ctlr);
143 return true;
144 }
145 }
146
147 return false;
148}
149
150static void qedf_handle_link_update(struct work_struct *work)
151{
152 struct qedf_ctx *qedf =
153 container_of(work, struct qedf_ctx, link_update.work);
154 int rc;
155
156 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
157
158 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
159 rc = qedf_initiate_fipvlan_req(qedf);
160 if (rc)
161 return;
162 /*
163 * If we get here then we never received a repsonse to our
164 * fip vlan request so set the vlan_id to the default and
165 * tell FCoE that the link is up
166 */
167 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
168 "response, falling back to default VLAN %d.\n",
169 qedf_fallback_vlan);
Chad Dupuiscf291162017-08-15 10:08:18 -0700170 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800171
172 /*
173 * Zero out data_src_addr so we'll update it with the new
174 * lport port_id
175 */
176 eth_zero_addr(qedf->data_src_addr);
177 fcoe_ctlr_link_up(&qedf->ctlr);
178 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
179 /*
180 * If we hit here and link_down_tmo_valid is still 1 it means
181 * that link_down_tmo timed out so set it to 0 to make sure any
182 * other readers have accurate state.
183 */
184 atomic_set(&qedf->link_down_tmo_valid, 0);
185 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
186 "Calling fcoe_ctlr_link_down().\n");
187 fcoe_ctlr_link_down(&qedf->ctlr);
188 qedf_wait_for_upload(qedf);
189 /* Reset the number of FIP VLAN retries */
190 qedf->fipvlan_retries = qedf_fipvlan_retries;
191 }
192}
193
Chad Dupuisa3cd42a92017-08-15 10:08:17 -0700194#define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1
195#define QEDF_FCOE_MAC_METHOD_FCF_MAP 2
196#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3
197static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
198{
199 u8 *granted_mac;
200 struct fc_frame_header *fh = fc_frame_header_get(fp);
201 u8 fc_map[3];
202 int method = 0;
203
204 /* Get granted MAC address from FIP FLOGI payload */
205 granted_mac = fr_cb(fp)->granted_mac;
206
207 /*
208 * We set the source MAC for FCoE traffic based on the Granted MAC
209 * address from the switch.
210 *
211 * If granted_mac is non-zero, we used that.
212 * If the granted_mac is zeroed out, created the FCoE MAC based on
213 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
214 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
215 * d_id of the FLOGI frame.
216 */
217 if (!is_zero_ether_addr(granted_mac)) {
218 ether_addr_copy(qedf->data_src_addr, granted_mac);
219 method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
220 } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
221 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
222 qedf->data_src_addr[0] = fc_map[0];
223 qedf->data_src_addr[1] = fc_map[1];
224 qedf->data_src_addr[2] = fc_map[2];
225 qedf->data_src_addr[3] = fh->fh_d_id[0];
226 qedf->data_src_addr[4] = fh->fh_d_id[1];
227 qedf->data_src_addr[5] = fh->fh_d_id[2];
228 method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
229 } else {
230 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
231 method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
232 }
233
234 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
235 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
236}
237
Dupuis, Chad61d86582017-02-15 06:28:23 -0800238static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
239 void *arg)
240{
241 struct fc_exch *exch = fc_seq_exch(seq);
242 struct fc_lport *lport = exch->lp;
243 struct qedf_ctx *qedf = lport_priv(lport);
244
245 if (!qedf) {
246 QEDF_ERR(NULL, "qedf is NULL.\n");
247 return;
248 }
249
250 /*
251 * If ERR_PTR is set then don't try to stat anything as it will cause
252 * a crash when we access fp.
253 */
254 if (IS_ERR(fp)) {
255 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
256 "fp has IS_ERR() set.\n");
257 goto skip_stat;
258 }
259
260 /* Log stats for FLOGI reject */
261 if (fc_frame_payload_op(fp) == ELS_LS_RJT)
262 qedf->flogi_failed++;
Chad Dupuisa3cd42a92017-08-15 10:08:17 -0700263 else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
264 /* Set the source MAC we will use for FCoE traffic */
265 qedf_set_data_src_addr(qedf, fp);
266 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800267
268 /* Complete flogi_compl so we can proceed to sending ADISCs */
269 complete(&qedf->flogi_compl);
270
271skip_stat:
272 /* Report response to libfc */
273 fc_lport_flogi_resp(seq, fp, lport);
274}
275
276static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
277 struct fc_frame *fp, unsigned int op,
278 void (*resp)(struct fc_seq *,
279 struct fc_frame *,
280 void *),
281 void *arg, u32 timeout)
282{
283 struct qedf_ctx *qedf = lport_priv(lport);
284
285 /*
286 * Intercept FLOGI for statistic purposes. Note we use the resp
287 * callback to tell if this is really a flogi.
288 */
289 if (resp == fc_lport_flogi_resp) {
290 qedf->flogi_cnt++;
291 return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
292 arg, timeout);
293 }
294
295 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
296}
297
298int qedf_send_flogi(struct qedf_ctx *qedf)
299{
300 struct fc_lport *lport;
301 struct fc_frame *fp;
302
303 lport = qedf->lport;
304
305 if (!lport->tt.elsct_send)
306 return -EINVAL;
307
308 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
309 if (!fp) {
310 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
311 return -ENOMEM;
312 }
313
314 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
315 "Sending FLOGI to reestablish session with switch.\n");
316 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
317 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
318
319 init_completion(&qedf->flogi_compl);
320
321 return 0;
322}
323
324struct qedf_tmp_rdata_item {
325 struct fc_rport_priv *rdata;
326 struct list_head list;
327};
328
329/*
330 * This function is called if link_down_tmo is in use. If we get a link up and
331 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
332 * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
333 */
334static void qedf_link_recovery(struct work_struct *work)
335{
336 struct qedf_ctx *qedf =
337 container_of(work, struct qedf_ctx, link_recovery.work);
338 struct qedf_rport *fcport;
339 struct fc_rport_priv *rdata;
340 struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
341 bool rc;
342 int retries = 30;
343 int rval, i;
344 struct list_head rdata_login_list;
345
346 INIT_LIST_HEAD(&rdata_login_list);
347
348 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
349 "Link down tmo did not expire.\n");
350
351 /*
352 * Essentially reset the fcoe_ctlr here without affecting the state
353 * of the libfc structs.
354 */
355 qedf->ctlr.state = FIP_ST_LINK_WAIT;
356 fcoe_ctlr_link_down(&qedf->ctlr);
357
358 /*
359 * Bring the link up before we send the fipvlan request so libfcoe
360 * can select a new fcf in parallel
361 */
362 fcoe_ctlr_link_up(&qedf->ctlr);
363
364 /* Since the link when down and up to verify which vlan we're on */
365 qedf->fipvlan_retries = qedf_fipvlan_retries;
366 rc = qedf_initiate_fipvlan_req(qedf);
Chad Dupuiscf291162017-08-15 10:08:18 -0700367 /* If getting the VLAN fails, set the VLAN to the fallback one */
Dupuis, Chad61d86582017-02-15 06:28:23 -0800368 if (!rc)
Chad Dupuiscf291162017-08-15 10:08:18 -0700369 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800370
371 /*
372 * We need to wait for an FCF to be selected due to the
373 * fcoe_ctlr_link_up other the FLOGI will be rejected.
374 */
375 while (retries > 0) {
376 if (qedf->ctlr.sel_fcf) {
377 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
378 "FCF reselected, proceeding with FLOGI.\n");
379 break;
380 }
381 msleep(500);
382 retries--;
383 }
384
385 if (retries < 1) {
386 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
387 "FCF selection.\n");
388 return;
389 }
390
391 rval = qedf_send_flogi(qedf);
392 if (rval)
393 return;
394
395 /* Wait for FLOGI completion before proceeding with sending ADISCs */
396 i = wait_for_completion_timeout(&qedf->flogi_compl,
397 qedf->lport->r_a_tov);
398 if (i == 0) {
399 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
400 return;
401 }
402
403 /*
404 * Call lport->tt.rport_login which will cause libfc to send an
405 * ADISC since the rport is in state ready.
406 */
407 rcu_read_lock();
408 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
409 rdata = fcport->rdata;
410 if (rdata == NULL)
411 continue;
412 rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
413 GFP_ATOMIC);
414 if (!rdata_item)
415 continue;
416 if (kref_get_unless_zero(&rdata->kref)) {
417 rdata_item->rdata = rdata;
418 list_add(&rdata_item->list, &rdata_login_list);
419 } else
420 kfree(rdata_item);
421 }
422 rcu_read_unlock();
423 /*
424 * Do the fc_rport_login outside of the rcu lock so we don't take a
425 * mutex in an atomic context.
426 */
427 list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
428 list) {
429 list_del(&rdata_item->list);
430 fc_rport_login(rdata_item->rdata);
431 kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
432 kfree(rdata_item);
433 }
434}
435
436static void qedf_update_link_speed(struct qedf_ctx *qedf,
437 struct qed_link_output *link)
438{
439 struct fc_lport *lport = qedf->lport;
440
441 lport->link_speed = FC_PORTSPEED_UNKNOWN;
442 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
443
444 /* Set fc_host link speed */
445 switch (link->speed) {
446 case 10000:
447 lport->link_speed = FC_PORTSPEED_10GBIT;
448 break;
449 case 25000:
450 lport->link_speed = FC_PORTSPEED_25GBIT;
451 break;
452 case 40000:
453 lport->link_speed = FC_PORTSPEED_40GBIT;
454 break;
455 case 50000:
456 lport->link_speed = FC_PORTSPEED_50GBIT;
457 break;
458 case 100000:
459 lport->link_speed = FC_PORTSPEED_100GBIT;
460 break;
461 default:
462 lport->link_speed = FC_PORTSPEED_UNKNOWN;
463 break;
464 }
465
466 /*
467 * Set supported link speed by querying the supported
468 * capabilities of the link.
469 */
470 if (link->supported_caps & SUPPORTED_10000baseKR_Full)
471 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
472 if (link->supported_caps & SUPPORTED_25000baseKR_Full)
473 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
474 if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
475 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
476 if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
477 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
478 if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
479 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
480 fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
481}
482
483static void qedf_link_update(void *dev, struct qed_link_output *link)
484{
485 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
486
487 if (link->link_up) {
Chad Dupuis3f9de7f2018-04-25 06:08:55 -0700488 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
489 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
490 "Ignoring link up event as link is already up.\n");
491 return;
492 }
Dupuis, Chad61d86582017-02-15 06:28:23 -0800493 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
494 link->speed / 1000);
495
496 /* Cancel any pending link down work */
497 cancel_delayed_work(&qedf->link_update);
498
499 atomic_set(&qedf->link_state, QEDF_LINK_UP);
500 qedf_update_link_speed(qedf, link);
501
Chad Dupuisba17d372018-04-25 06:08:51 -0700502 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
503 qedf_dcbx_no_wait) {
Chad Dupuisf7e8d572017-05-31 06:33:59 -0700504 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
505 "DCBx done.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -0800506 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
507 queue_delayed_work(qedf->link_update_wq,
508 &qedf->link_recovery, 0);
509 else
510 queue_delayed_work(qedf->link_update_wq,
511 &qedf->link_update, 0);
512 atomic_set(&qedf->link_down_tmo_valid, 0);
513 }
514
515 } else {
516 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
517
518 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
519 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
520 /*
521 * Flag that we're waiting for the link to come back up before
522 * informing the fcoe layer of the event.
523 */
524 if (qedf_link_down_tmo > 0) {
525 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
526 "Starting link down tmo.\n");
527 atomic_set(&qedf->link_down_tmo_valid, 1);
528 }
Chad Dupuis84b2ba62018-04-25 06:08:52 -0700529 qedf->vlan_id = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800530 qedf_update_link_speed(qedf, link);
531 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
532 qedf_link_down_tmo * HZ);
533 }
534}
535
536
537static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
538{
539 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
Chad Dupuis84b2ba62018-04-25 06:08:52 -0700540 u8 tmp_prio;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800541
542 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
543 "prio=%d.\n", get->operational.valid, get->operational.enabled,
544 get->operational.app_prio.fcoe);
545
546 if (get->operational.enabled && get->operational.valid) {
547 /* If DCBX was already negotiated on link up then just exit */
548 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
549 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
550 "DCBX already set on link up.\n");
551 return;
552 }
553
554 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
555
Chad Dupuis84b2ba62018-04-25 06:08:52 -0700556 /*
557 * Set the 8021q priority in the following manner:
558 *
559 * 1. If a modparam is set use that
560 * 2. If the value is not between 0..7 use the default
561 * 3. Use the priority we get from the DCBX app tag
562 */
563 tmp_prio = get->operational.app_prio.fcoe;
564 if (qedf_default_prio > -1)
565 qedf->prio = qedf_default_prio;
566 else if (tmp_prio < 0 || tmp_prio > 7) {
567 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
568 "FIP/FCoE prio %d out of range, setting to %d.\n",
569 tmp_prio, QEDF_DEFAULT_PRIO);
570 qedf->prio = QEDF_DEFAULT_PRIO;
571 } else
572 qedf->prio = tmp_prio;
573
Chad Dupuisba17d372018-04-25 06:08:51 -0700574 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
575 !qedf_dcbx_no_wait) {
Dupuis, Chad61d86582017-02-15 06:28:23 -0800576 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
577 queue_delayed_work(qedf->link_update_wq,
578 &qedf->link_recovery, 0);
579 else
580 queue_delayed_work(qedf->link_update_wq,
581 &qedf->link_update, 0);
582 atomic_set(&qedf->link_down_tmo_valid, 0);
583 }
584 }
585
586}
587
588static u32 qedf_get_login_failures(void *cookie)
589{
590 struct qedf_ctx *qedf;
591
592 qedf = (struct qedf_ctx *)cookie;
593 return qedf->flogi_failed;
594}
595
596static struct qed_fcoe_cb_ops qedf_cb_ops = {
597 {
598 .link_update = qedf_link_update,
599 .dcbx_aen = qedf_dcbx_handler,
Chad Dupuis8673daf2018-05-22 00:28:44 -0700600 .get_generic_tlv_data = qedf_get_generic_tlv_data,
Chad Dupuis642a0b32018-05-22 00:28:43 -0700601 .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
Dupuis, Chad61d86582017-02-15 06:28:23 -0800602 }
603};
604
605/*
606 * Various transport templates.
607 */
608
609static struct scsi_transport_template *qedf_fc_transport_template;
610static struct scsi_transport_template *qedf_fc_vport_transport_template;
611
612/*
613 * SCSI EH handlers
614 */
615static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
616{
617 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
Dupuis, Chad61d86582017-02-15 06:28:23 -0800618 struct fc_lport *lport;
619 struct qedf_ctx *qedf;
620 struct qedf_ioreq *io_req;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700621 struct fc_rport_libfc_priv *rp = rport->dd_data;
622 struct fc_rport_priv *rdata;
623 struct qedf_rport *fcport = NULL;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800624 int rc = FAILED;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700625 int wait_count = 100;
626 int refcount = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800627 int rval;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700628 int got_ref = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800629
630 lport = shost_priv(sc_cmd->device->host);
631 qedf = (struct qedf_ctx *)lport_priv(lport);
632
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700633 /* rport and tgt are allocated together, so tgt should be non-NULL */
634 fcport = (struct qedf_rport *)&rp[1];
635 rdata = fcport->rdata;
636 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
637 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
638 rc = 1;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800639 goto out;
640 }
641
Dupuis, Chad61d86582017-02-15 06:28:23 -0800642
643 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
644 if (!io_req) {
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700645 QEDF_ERR(&qedf->dbg_ctx,
646 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
647 sc_cmd, sc_cmd->cmnd[0],
648 rdata->ids.port_id);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800649 rc = SUCCESS;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700650 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800651 }
652
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700653 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
654 if (rval)
655 got_ref = 1;
656
657 /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
658 if (!rval || io_req->sc_cmd != sc_cmd) {
659 QEDF_ERR(&qedf->dbg_ctx,
660 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
661 io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
662
663 goto drop_rdata_kref;
664 }
665
666 if (fc_remote_port_chkready(rport)) {
667 refcount = kref_read(&io_req->refcount);
668 QEDF_ERR(&qedf->dbg_ctx,
669 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
670 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
671 refcount, rdata->ids.port_id);
672
673 goto drop_rdata_kref;
674 }
675
676 rc = fc_block_scsi_eh(sc_cmd);
677 if (rc)
678 goto drop_rdata_kref;
679
680 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
681 QEDF_ERR(&qedf->dbg_ctx,
682 "Connection uploading, xid=0x%x., port_id=%06x\n",
683 io_req->xid, rdata->ids.port_id);
684 while (io_req->sc_cmd && (wait_count != 0)) {
685 msleep(100);
686 wait_count--;
687 }
688 if (wait_count) {
689 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
690 rc = SUCCESS;
691 } else {
692 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
693 rc = FAILED;
694 }
695 goto drop_rdata_kref;
696 }
697
698 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
699 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
700 goto drop_rdata_kref;
701 }
702
703 QEDF_ERR(&qedf->dbg_ctx,
704 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
705 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
706 rdata->ids.port_id);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800707
708 if (qedf->stop_io_on_error) {
709 qedf_stop_all_io(qedf);
710 rc = SUCCESS;
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700711 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800712 }
713
714 init_completion(&io_req->abts_done);
715 rval = qedf_initiate_abts(io_req, true);
716 if (rval) {
717 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700718 /*
719 * If we fail to queue the ABTS then return this command to
720 * the SCSI layer as it will own and free the xid
721 */
722 rc = SUCCESS;
723 qedf_scsi_done(qedf, io_req, DID_ERROR);
724 goto drop_rdata_kref;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800725 }
726
727 wait_for_completion(&io_req->abts_done);
728
729 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
730 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
731 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
732 /*
733 * If we get a reponse to the abort this is success from
734 * the perspective that all references to the command have
735 * been removed from the driver and firmware
736 */
737 rc = SUCCESS;
738 } else {
739 /* If the abort and cleanup failed then return a failure */
740 rc = FAILED;
741 }
742
743 if (rc == SUCCESS)
744 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
745 io_req->xid);
746 else
747 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
748 io_req->xid);
749
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700750drop_rdata_kref:
751 kref_put(&rdata->kref, fc_rport_destroy);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800752out:
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700753 if (got_ref)
754 kref_put(&io_req->refcount, qedf_release_cmd);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800755 return rc;
756}
757
758static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
759{
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700760 QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
761 sc_cmd->device->host->host_no, sc_cmd->device->id,
762 sc_cmd->device->lun);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800763 return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
764}
765
766static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
767{
Saurav Kashyap69ef2c62019-03-26 00:38:38 -0700768 QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
769 sc_cmd->device->host->host_no, sc_cmd->device->id,
770 sc_cmd->device->lun);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800771 return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
772}
773
774void qedf_wait_for_upload(struct qedf_ctx *qedf)
775{
776 while (1) {
777 if (atomic_read(&qedf->num_offloads))
778 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
779 "Waiting for all uploads to complete.\n");
780 else
781 break;
782 msleep(500);
783 }
784}
785
Chad Dupuis5cf446d2017-05-31 06:33:55 -0700786/* Performs soft reset of qedf_ctx by simulating a link down/up */
787static void qedf_ctx_soft_reset(struct fc_lport *lport)
Dupuis, Chad61d86582017-02-15 06:28:23 -0800788{
Dupuis, Chad61d86582017-02-15 06:28:23 -0800789 struct qedf_ctx *qedf;
790
Dupuis, Chad61d86582017-02-15 06:28:23 -0800791 if (lport->vport) {
792 QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
Chad Dupuis5cf446d2017-05-31 06:33:55 -0700793 return;
Dupuis, Chad61d86582017-02-15 06:28:23 -0800794 }
795
Chad Dupuis5cf446d2017-05-31 06:33:55 -0700796 qedf = lport_priv(lport);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800797
798 /* For host reset, essentially do a soft link up/down */
799 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800800 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
801 0);
802 qedf_wait_for_upload(qedf);
803 atomic_set(&qedf->link_state, QEDF_LINK_UP);
804 qedf->vlan_id = 0;
805 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
806 0);
Chad Dupuis5cf446d2017-05-31 06:33:55 -0700807}
808
809/* Reset the host by gracefully logging out and then logging back in */
810static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
811{
812 struct fc_lport *lport;
813 struct qedf_ctx *qedf;
814
815 lport = shost_priv(sc_cmd->device->host);
816 qedf = lport_priv(lport);
817
818 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
819 test_bit(QEDF_UNLOADING, &qedf->flags))
820 return FAILED;
821
822 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
823
824 qedf_ctx_soft_reset(lport);
Dupuis, Chad61d86582017-02-15 06:28:23 -0800825
826 return SUCCESS;
827}
828
829static int qedf_slave_configure(struct scsi_device *sdev)
830{
831 if (qedf_queue_depth) {
832 scsi_change_queue_depth(sdev, qedf_queue_depth);
833 }
834
835 return 0;
836}
837
838static struct scsi_host_template qedf_host_template = {
839 .module = THIS_MODULE,
840 .name = QEDF_MODULE_NAME,
841 .this_id = -1,
Chad Dupuisa7746f12017-05-31 06:34:00 -0700842 .cmd_per_lun = 32,
Dupuis, Chad61d86582017-02-15 06:28:23 -0800843 .max_sectors = 0xffff,
844 .queuecommand = qedf_queuecommand,
845 .shost_attrs = qedf_host_attrs,
846 .eh_abort_handler = qedf_eh_abort,
847 .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
848 .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
849 .eh_host_reset_handler = qedf_eh_host_reset,
850 .slave_configure = qedf_slave_configure,
851 .dma_boundary = QED_HW_DMA_BOUNDARY,
852 .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
853 .can_queue = FCOE_PARAMS_NUM_TASKS,
Chad Dupuis6088cfa2017-05-31 06:34:01 -0700854 .change_queue_depth = scsi_change_queue_depth,
Dupuis, Chad61d86582017-02-15 06:28:23 -0800855};
856
857static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
858{
859 int rc;
860
861 spin_lock(&qedf_global_lock);
862 rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
863 spin_unlock(&qedf_global_lock);
864
865 return rc;
866}
867
868static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
869{
870 struct qedf_rport *fcport;
871 struct fc_rport_priv *rdata;
872
873 rcu_read_lock();
874 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
875 rdata = fcport->rdata;
876 if (rdata == NULL)
877 continue;
878 if (rdata->ids.port_id == port_id) {
879 rcu_read_unlock();
880 return fcport;
881 }
882 }
883 rcu_read_unlock();
884
885 /* Return NULL to caller to let them know fcport was not found */
886 return NULL;
887}
888
889/* Transmits an ELS frame over an offloaded session */
890static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
891{
892 struct fc_frame_header *fh;
893 int rc = 0;
894
895 fh = fc_frame_header_get(fp);
896 if ((fh->fh_type == FC_TYPE_ELS) &&
897 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
898 switch (fc_frame_payload_op(fp)) {
899 case ELS_ADISC:
900 qedf_send_adisc(fcport, fp);
901 rc = 1;
902 break;
903 }
904 }
905
906 return rc;
907}
908
909/**
910 * qedf_xmit - qedf FCoE frame transmit function
911 *
912 */
913static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
914{
915 struct fc_lport *base_lport;
916 struct qedf_ctx *qedf;
917 struct ethhdr *eh;
918 struct fcoe_crc_eof *cp;
919 struct sk_buff *skb;
920 struct fc_frame_header *fh;
921 struct fcoe_hdr *hp;
922 u8 sof, eof;
923 u32 crc;
924 unsigned int hlen, tlen, elen;
925 int wlen;
926 struct fc_stats *stats;
927 struct fc_lport *tmp_lport;
928 struct fc_lport *vn_port = NULL;
929 struct qedf_rport *fcport;
930 int rc;
931 u16 vlan_tci = 0;
932
933 qedf = (struct qedf_ctx *)lport_priv(lport);
934
935 fh = fc_frame_header_get(fp);
936 skb = fp_skb(fp);
937
938 /* Filter out traffic to other NPIV ports on the same host */
939 if (lport->vport)
940 base_lport = shost_priv(vport_to_shost(lport->vport));
941 else
942 base_lport = lport;
943
944 /* Flag if the destination is the base port */
945 if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
946 vn_port = base_lport;
947 } else {
948 /* Got through the list of vports attached to the base_lport
949 * and see if we have a match with the destination address.
950 */
951 list_for_each_entry(tmp_lport, &base_lport->vports, list) {
952 if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
953 vn_port = tmp_lport;
954 break;
955 }
956 }
957 }
958 if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
959 struct fc_rport_priv *rdata = NULL;
960
961 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
962 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
963 kfree_skb(skb);
964 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
965 if (rdata)
966 rdata->retries = lport->max_rport_retry_count;
967 return -EINVAL;
968 }
969 /* End NPIV filtering */
970
971 if (!qedf->ctlr.sel_fcf) {
972 kfree_skb(skb);
973 return 0;
974 }
975
976 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
977 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
978 kfree_skb(skb);
979 return 0;
980 }
981
982 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
983 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
984 kfree_skb(skb);
985 return 0;
986 }
987
988 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
989 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
990 return 0;
991 }
992
993 /* Check to see if this needs to be sent on an offloaded session */
994 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
995
996 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
997 rc = qedf_xmit_l2_frame(fcport, fp);
998 /*
999 * If the frame was successfully sent over the middle path
1000 * then do not try to also send it over the LL2 path
1001 */
1002 if (rc)
1003 return 0;
1004 }
1005
1006 sof = fr_sof(fp);
1007 eof = fr_eof(fp);
1008
1009 elen = sizeof(struct ethhdr);
1010 hlen = sizeof(struct fcoe_hdr);
1011 tlen = sizeof(struct fcoe_crc_eof);
1012 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1013
1014 skb->ip_summed = CHECKSUM_NONE;
1015 crc = fcoe_fc_crc(fp);
1016
1017 /* copy port crc and eof to the skb buff */
1018 if (skb_is_nonlinear(skb)) {
1019 skb_frag_t *frag;
1020
1021 if (qedf_get_paged_crc_eof(skb, tlen)) {
1022 kfree_skb(skb);
1023 return -ENOMEM;
1024 }
1025 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1026 cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
1027 } else {
Johannes Berg4df864c2017-06-16 14:29:21 +02001028 cp = skb_put(skb, tlen);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001029 }
1030
1031 memset(cp, 0, sizeof(*cp));
1032 cp->fcoe_eof = eof;
1033 cp->fcoe_crc32 = cpu_to_le32(~crc);
1034 if (skb_is_nonlinear(skb)) {
1035 kunmap_atomic(cp);
1036 cp = NULL;
1037 }
1038
1039
1040 /* adjust skb network/transport offsets to match mac/fcoe/port */
1041 skb_push(skb, elen + hlen);
1042 skb_reset_mac_header(skb);
1043 skb_reset_network_header(skb);
1044 skb->mac_len = elen;
1045 skb->protocol = htons(ETH_P_FCOE);
1046
Chad Dupuiscf291162017-08-15 10:08:18 -07001047 /*
1048 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
1049 * for FIP/FCoE traffic.
1050 */
Dupuis, Chad61d86582017-02-15 06:28:23 -08001051 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1052
1053 /* fill up mac and fcoe headers */
1054 eh = eth_hdr(skb);
1055 eh->h_proto = htons(ETH_P_FCOE);
1056 if (qedf->ctlr.map_dest)
1057 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1058 else
1059 /* insert GW address */
1060 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1061
1062 /* Set the source MAC address */
Chad Dupuisa3cd42a92017-08-15 10:08:17 -07001063 ether_addr_copy(eh->h_source, qedf->data_src_addr);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001064
1065 hp = (struct fcoe_hdr *)(eh + 1);
1066 memset(hp, 0, sizeof(*hp));
1067 if (FC_FCOE_VER)
1068 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1069 hp->fcoe_sof = sof;
1070
1071 /*update tx stats */
1072 stats = per_cpu_ptr(lport->stats, get_cpu());
1073 stats->TxFrames++;
1074 stats->TxWords += wlen;
1075 put_cpu();
1076
1077 /* Get VLAN ID from skb for printing purposes */
1078 __vlan_hwaccel_get_tag(skb, &vlan_tci);
1079
1080 /* send down to lld */
1081 fr_dev(fp) = lport;
1082 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1083 "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
1084 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
1085 vlan_tci);
1086 if (qedf_dump_frames)
1087 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1088 1, skb->data, skb->len, false);
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001089 qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001090
1091 return 0;
1092}
1093
1094static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1095{
1096 int rval = 0;
1097 u32 *pbl;
1098 dma_addr_t page;
1099 int num_pages;
1100
1101 /* Calculate appropriate queue and PBL sizes */
1102 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
1103 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
1104 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
1105 sizeof(void *);
1106 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1107
Luis Chamberlain750afb02019-01-04 09:23:09 +01001108 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1109 &fcport->sq_dma, GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001110 if (!fcport->sq) {
Christophe JAILLET32eebb32017-06-11 08:16:04 +02001111 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08001112 rval = 1;
1113 goto out;
1114 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001115
Luis Chamberlain750afb02019-01-04 09:23:09 +01001116 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1117 fcport->sq_pbl_size,
1118 &fcport->sq_pbl_dma, GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001119 if (!fcport->sq_pbl) {
Christophe JAILLET32eebb32017-06-11 08:16:04 +02001120 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08001121 rval = 1;
1122 goto out_free_sq;
1123 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08001124
1125 /* Create PBL */
1126 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
1127 page = fcport->sq_dma;
1128 pbl = (u32 *)fcport->sq_pbl;
1129
1130 while (num_pages--) {
1131 *pbl = U64_LO(page);
1132 pbl++;
1133 *pbl = U64_HI(page);
1134 pbl++;
1135 page += QEDF_PAGE_SIZE;
1136 }
1137
1138 return rval;
1139
1140out_free_sq:
1141 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1142 fcport->sq_dma);
1143out:
1144 return rval;
1145}
1146
1147static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1148{
1149 if (fcport->sq_pbl)
1150 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1151 fcport->sq_pbl, fcport->sq_pbl_dma);
1152 if (fcport->sq)
1153 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1154 fcport->sq, fcport->sq_dma);
1155}
1156
1157static int qedf_offload_connection(struct qedf_ctx *qedf,
1158 struct qedf_rport *fcport)
1159{
1160 struct qed_fcoe_params_offload conn_info;
1161 u32 port_id;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001162 int rval;
1163 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1164
1165 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1166 "portid=%06x.\n", fcport->rdata->ids.port_id);
1167 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1168 &fcport->fw_cid, &fcport->p_doorbell);
1169 if (rval) {
1170 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1171 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1172 rval = 1; /* For some reason qed returns 0 on failure here */
1173 goto out;
1174 }
1175
1176 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1177 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1178 fcport->fw_cid, fcport->handle);
1179
1180 memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1181
1182 /* Fill in the offload connection info */
1183 conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1184
1185 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1186 conn_info.sq_next_page_addr =
1187 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1188
1189 /* Need to use our FCoE MAC for the offload session */
Chad Dupuisa3cd42a92017-08-15 10:08:17 -07001190 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001191
1192 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1193
1194 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1195 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
1196 conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1197 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1198
1199 /* Set VLAN data */
1200 conn_info.vlan_tag = qedf->vlan_id <<
1201 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1202 conn_info.vlan_tag |=
Chad Dupuis84b2ba62018-04-25 06:08:52 -07001203 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001204 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1205 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1206
1207 /* Set host port source id */
1208 port_id = fc_host_port_id(qedf->lport->host);
1209 fcport->sid = port_id;
1210 conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1211 conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1212 conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1213
1214 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1215
1216 /* Set remote port destination id */
1217 port_id = fcport->rdata->rport->port_id;
1218 conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1219 conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1220 conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1221
1222 conn_info.def_q_idx = 0; /* Default index for send queue? */
1223
1224 /* Set FC-TAPE specific flags if needed */
1225 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1226 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1227 "Enable CONF, REC for portid=%06x.\n",
1228 fcport->rdata->ids.port_id);
1229 conn_info.flags |= 1 <<
1230 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1231 conn_info.flags |=
1232 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1233 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1234 }
1235
1236 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1237 if (rval) {
1238 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1239 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1240 goto out_free_conn;
1241 } else
1242 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1243 "succeeded portid=%06x total_sqe=%d.\n",
1244 fcport->rdata->ids.port_id, total_sqe);
1245
1246 spin_lock_init(&fcport->rport_lock);
1247 atomic_set(&fcport->free_sqes, total_sqe);
1248 return 0;
1249out_free_conn:
1250 qed_ops->release_conn(qedf->cdev, fcport->handle);
1251out:
1252 return rval;
1253}
1254
1255#define QEDF_TERM_BUFF_SIZE 10
1256static void qedf_upload_connection(struct qedf_ctx *qedf,
1257 struct qedf_rport *fcport)
1258{
1259 void *term_params;
1260 dma_addr_t term_params_dma;
1261
1262 /* Term params needs to be a DMA coherent buffer as qed shared the
1263 * physical DMA address with the firmware. The buffer may be used in
1264 * the receive path so we may eventually have to move this.
1265 */
1266 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1267 &term_params_dma, GFP_KERNEL);
1268
1269 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1270 "port_id=%06x.\n", fcport->rdata->ids.port_id);
1271
1272 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1273 qed_ops->release_conn(qedf->cdev, fcport->handle);
1274
1275 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1276 term_params_dma);
1277}
1278
1279static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1280 struct qedf_rport *fcport)
1281{
1282 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1283 fcport->rdata->ids.port_id);
1284
1285 /* Flush any remaining i/o's before we upload the connection */
1286 qedf_flush_active_ios(fcport, -1);
1287
1288 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
1289 qedf_upload_connection(qedf, fcport);
1290 qedf_free_sq(qedf, fcport);
1291 fcport->rdata = NULL;
1292 fcport->qedf = NULL;
1293}
1294
1295/**
1296 * This event_callback is called after successful completion of libfc
1297 * initiated target login. qedf can proceed with initiating the session
1298 * establishment.
1299 */
1300static void qedf_rport_event_handler(struct fc_lport *lport,
1301 struct fc_rport_priv *rdata,
1302 enum fc_rport_event event)
1303{
1304 struct qedf_ctx *qedf = lport_priv(lport);
1305 struct fc_rport *rport = rdata->rport;
1306 struct fc_rport_libfc_priv *rp;
1307 struct qedf_rport *fcport;
1308 u32 port_id;
1309 int rval;
1310 unsigned long flags;
1311
1312 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1313 "port_id = 0x%x\n", event, rdata->ids.port_id);
1314
1315 switch (event) {
1316 case RPORT_EV_READY:
1317 if (!rport) {
1318 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1319 break;
1320 }
1321
1322 rp = rport->dd_data;
1323 fcport = (struct qedf_rport *)&rp[1];
1324 fcport->qedf = qedf;
1325
1326 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1327 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1328 "portid=0x%x as max number of offloaded sessions "
1329 "reached.\n", rdata->ids.port_id);
1330 return;
1331 }
1332
1333 /*
1334 * Don't try to offload the session again. Can happen when we
1335 * get an ADISC
1336 */
1337 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1338 QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1339 "offloaded, portid=0x%x.\n",
1340 rdata->ids.port_id);
1341 return;
1342 }
1343
1344 if (rport->port_id == FC_FID_DIR_SERV) {
1345 /*
1346 * qedf_rport structure doesn't exist for
1347 * directory server.
1348 * We should not come here, as lport will
1349 * take care of fabric login
1350 */
1351 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1352 "exist for dir server port_id=%x\n",
1353 rdata->ids.port_id);
1354 break;
1355 }
1356
1357 if (rdata->spp_type != FC_TYPE_FCP) {
1358 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
Colin Ian Kingacef2692017-07-03 20:21:39 +01001359 "Not offloading since spp type isn't FCP\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08001360 break;
1361 }
1362 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1363 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1364 "Not FCP target so not offloading\n");
1365 break;
1366 }
1367
1368 fcport->rdata = rdata;
1369 fcport->rport = rport;
1370
1371 rval = qedf_alloc_sq(qedf, fcport);
1372 if (rval) {
1373 qedf_cleanup_fcport(qedf, fcport);
1374 break;
1375 }
1376
1377 /* Set device type */
1378 if (rdata->flags & FC_RP_FLAGS_RETRY &&
1379 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1380 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1381 fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1382 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1383 "portid=%06x is a TAPE device.\n",
1384 rdata->ids.port_id);
1385 } else {
1386 fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1387 }
1388
1389 rval = qedf_offload_connection(qedf, fcport);
1390 if (rval) {
1391 qedf_cleanup_fcport(qedf, fcport);
1392 break;
1393 }
1394
1395 /* Add fcport to list of qedf_ctx list of offloaded ports */
1396 spin_lock_irqsave(&qedf->hba_lock, flags);
1397 list_add_rcu(&fcport->peers, &qedf->fcports);
1398 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1399
1400 /*
1401 * Set the session ready bit to let everyone know that this
1402 * connection is ready for I/O
1403 */
1404 set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
1405 atomic_inc(&qedf->num_offloads);
1406
1407 break;
1408 case RPORT_EV_LOGO:
1409 case RPORT_EV_FAILED:
1410 case RPORT_EV_STOP:
1411 port_id = rdata->ids.port_id;
1412 if (port_id == FC_FID_DIR_SERV)
1413 break;
1414
1415 if (!rport) {
1416 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1417 "port_id=%x - rport notcreated Yet!!\n", port_id);
1418 break;
1419 }
1420 rp = rport->dd_data;
1421 /*
1422 * Perform session upload. Note that rdata->peers is already
1423 * removed from disc->rports list before we get this event.
1424 */
1425 fcport = (struct qedf_rport *)&rp[1];
1426
1427 /* Only free this fcport if it is offloaded already */
Shyam Sundar5d5e55652019-03-26 00:38:37 -07001428 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1429 !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1430 &fcport->flags)) {
1431 set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1432 &fcport->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001433 qedf_cleanup_fcport(qedf, fcport);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001434 /*
1435 * Remove fcport to list of qedf_ctx list of offloaded
1436 * ports
1437 */
1438 spin_lock_irqsave(&qedf->hba_lock, flags);
1439 list_del_rcu(&fcport->peers);
1440 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1441
1442 clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1443 &fcport->flags);
1444 atomic_dec(&qedf->num_offloads);
1445 }
1446
1447 break;
1448
1449 case RPORT_EV_NONE:
1450 break;
1451 }
1452}
1453
1454static void qedf_abort_io(struct fc_lport *lport)
1455{
1456 /* NO-OP but need to fill in the template */
1457}
1458
1459static void qedf_fcp_cleanup(struct fc_lport *lport)
1460{
1461 /*
1462 * NO-OP but need to fill in template to prevent a NULL
1463 * function pointer dereference during link down. I/Os
1464 * will be flushed when port is uploaded.
1465 */
1466}
1467
1468static struct libfc_function_template qedf_lport_template = {
1469 .frame_send = qedf_xmit,
1470 .fcp_abort_io = qedf_abort_io,
1471 .fcp_cleanup = qedf_fcp_cleanup,
1472 .rport_event_callback = qedf_rport_event_handler,
1473 .elsct_send = qedf_elsct_send,
1474};
1475
1476static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1477{
Sedat Dilek8beb90a2019-02-15 13:19:20 +01001478 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001479
1480 qedf->ctlr.send = qedf_fip_send;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001481 qedf->ctlr.get_src_addr = qedf_get_src_mac;
1482 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1483}
1484
Chad Dupuis5185b322017-05-31 06:33:48 -07001485static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1486{
1487 struct fc_lport *lport = qedf->lport;
1488 struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
1489 u8 buf[8];
1490 int i, pos;
1491
1492 /*
1493 * fdmi_enabled needs to be set for libfc to execute FDMI registration.
1494 */
1495 lport->fdmi_enabled = 1;
1496
1497 /*
1498 * Setup the necessary fc_host attributes to that will be used to fill
1499 * in the FDMI information.
1500 */
1501
1502 /* Get the PCI-e Device Serial Number Capability */
1503 pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1504 if (pos) {
1505 pos += 4;
1506 for (i = 0; i < 8; i++)
1507 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1508
1509 snprintf(fc_host->serial_number,
1510 sizeof(fc_host->serial_number),
1511 "%02X%02X%02X%02X%02X%02X%02X%02X",
1512 buf[7], buf[6], buf[5], buf[4],
1513 buf[3], buf[2], buf[1], buf[0]);
1514 } else
1515 snprintf(fc_host->serial_number,
1516 sizeof(fc_host->serial_number), "Unknown");
1517
1518 snprintf(fc_host->manufacturer,
1519 sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
1520
1521 snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
1522
1523 snprintf(fc_host->model_description, sizeof(fc_host->model_description),
1524 "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
1525 "(FCoE)");
1526
1527 snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
1528 "Rev %d", qedf->pdev->revision);
1529
1530 snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
1531 "%s", QEDF_VERSION);
1532
1533 snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
1534 "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
1535 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
1536}
1537
Dupuis, Chad61d86582017-02-15 06:28:23 -08001538static int qedf_lport_setup(struct qedf_ctx *qedf)
1539{
1540 struct fc_lport *lport = qedf->lport;
1541
1542 lport->link_up = 0;
1543 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1544 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1545 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1546 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1547 lport->boot_time = jiffies;
1548 lport->e_d_tov = 2 * 1000;
1549 lport->r_a_tov = 10 * 1000;
1550
1551 /* Set NPIV support */
1552 lport->does_npiv = 1;
1553 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1554
1555 fc_set_wwnn(lport, qedf->wwnn);
1556 fc_set_wwpn(lport, qedf->wwpn);
1557
1558 fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
1559
1560 /* Allocate the exchange manager */
Chad Dupuis650ce642019-03-26 00:38:34 -07001561 fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1562 0xfffe, NULL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001563
1564 if (fc_lport_init_stats(lport))
1565 return -ENOMEM;
1566
1567 /* Finish lport config */
1568 fc_lport_config(lport);
1569
1570 /* Set max frame size */
1571 fc_set_mfs(lport, QEDF_MFS);
1572 fc_host_maxframe_size(lport->host) = lport->mfs;
1573
1574 /* Set default dev_loss_tmo based on module parameter */
1575 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1576
1577 /* Set symbolic node name */
1578 snprintf(fc_host_symbolic_name(lport->host), 256,
1579 "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
1580
Chad Dupuis5185b322017-05-31 06:33:48 -07001581 qedf_setup_fdmi(qedf);
1582
Dupuis, Chad61d86582017-02-15 06:28:23 -08001583 return 0;
1584}
1585
1586/*
1587 * NPIV functions
1588 */
1589
1590static int qedf_vport_libfc_config(struct fc_vport *vport,
1591 struct fc_lport *lport)
1592{
1593 lport->link_up = 0;
1594 lport->qfull = 0;
1595 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1596 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1597 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1598 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1599 lport->boot_time = jiffies;
1600 lport->e_d_tov = 2 * 1000;
1601 lport->r_a_tov = 10 * 1000;
1602 lport->does_npiv = 1; /* Temporary until we add NPIV support */
1603
1604 /* Allocate stats for vport */
1605 if (fc_lport_init_stats(lport))
1606 return -ENOMEM;
1607
1608 /* Finish lport config */
1609 fc_lport_config(lport);
1610
1611 /* offload related configuration */
1612 lport->crc_offload = 0;
1613 lport->seq_offload = 0;
1614 lport->lro_enabled = 0;
1615 lport->lro_xid = 0;
1616 lport->lso_max = 0;
1617
1618 return 0;
1619}
1620
1621static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1622{
1623 struct Scsi_Host *shost = vport_to_shost(vport);
1624 struct fc_lport *n_port = shost_priv(shost);
1625 struct fc_lport *vn_port;
1626 struct qedf_ctx *base_qedf = lport_priv(n_port);
1627 struct qedf_ctx *vport_qedf;
1628
1629 char buf[32];
1630 int rc = 0;
1631
1632 rc = fcoe_validate_vport_create(vport);
1633 if (rc) {
1634 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1635 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1636 "WWPN (0x%s) already exists.\n", buf);
1637 goto err1;
1638 }
1639
1640 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
1641 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1642 "because link is not up.\n");
1643 rc = -EIO;
1644 goto err1;
1645 }
1646
1647 vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
1648 if (!vn_port) {
1649 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1650 "for vport.\n");
1651 rc = -ENOMEM;
1652 goto err1;
1653 }
1654
1655 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1656 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1657 buf);
1658
1659 /* Copy some fields from base_qedf */
1660 vport_qedf = lport_priv(vn_port);
1661 memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1662
1663 /* Set qedf data specific to this vport */
1664 vport_qedf->lport = vn_port;
1665 /* Use same hba_lock as base_qedf */
1666 vport_qedf->hba_lock = base_qedf->hba_lock;
1667 vport_qedf->pdev = base_qedf->pdev;
1668 vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1669 init_completion(&vport_qedf->flogi_compl);
1670 INIT_LIST_HEAD(&vport_qedf->fcports);
1671
1672 rc = qedf_vport_libfc_config(vport, vn_port);
1673 if (rc) {
1674 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1675 "for lport stats.\n");
1676 goto err2;
1677 }
1678
1679 fc_set_wwnn(vn_port, vport->node_name);
1680 fc_set_wwpn(vn_port, vport->port_name);
1681 vport_qedf->wwnn = vn_port->wwnn;
1682 vport_qedf->wwpn = vn_port->wwpn;
1683
1684 vn_port->host->transportt = qedf_fc_vport_transport_template;
Chad Dupuis650ce642019-03-26 00:38:34 -07001685 vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001686 vn_port->host->max_lun = qedf_max_lun;
1687 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1688 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1689
1690 rc = scsi_add_host(vn_port->host, &vport->dev);
1691 if (rc) {
1692 QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
1693 goto err2;
1694 }
1695
1696 /* Set default dev_loss_tmo based on module parameter */
1697 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1698
1699 /* Init libfc stuffs */
1700 memcpy(&vn_port->tt, &qedf_lport_template,
1701 sizeof(qedf_lport_template));
1702 fc_exch_init(vn_port);
1703 fc_elsct_init(vn_port);
1704 fc_lport_init(vn_port);
1705 fc_disc_init(vn_port);
1706 fc_disc_config(vn_port, vn_port);
1707
1708
1709 /* Allocate the exchange manager */
1710 shost = vport_to_shost(vport);
1711 n_port = shost_priv(shost);
1712 fc_exch_mgr_list_clone(n_port, vn_port);
1713
1714 /* Set max frame size */
1715 fc_set_mfs(vn_port, QEDF_MFS);
1716
1717 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1718
1719 if (disabled) {
1720 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1721 } else {
1722 vn_port->boot_time = jiffies;
1723 fc_fabric_login(vn_port);
1724 fc_vport_setlink(vn_port);
1725 }
1726
1727 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1728 vn_port);
1729
1730 /* Set up debug context for vport */
1731 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1732 vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1733
1734err2:
1735 scsi_host_put(vn_port->host);
1736err1:
1737 return rc;
1738}
1739
1740static int qedf_vport_destroy(struct fc_vport *vport)
1741{
1742 struct Scsi_Host *shost = vport_to_shost(vport);
1743 struct fc_lport *n_port = shost_priv(shost);
1744 struct fc_lport *vn_port = vport->dd_data;
Chad Dupuis4f4616c2018-04-25 06:09:02 -07001745 struct qedf_ctx *qedf = lport_priv(vn_port);
1746
1747 if (!qedf) {
1748 QEDF_ERR(NULL, "qedf is NULL.\n");
1749 goto out;
1750 }
1751
1752 /* Set unloading bit on vport qedf_ctx to prevent more I/O */
1753 set_bit(QEDF_UNLOADING, &qedf->flags);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001754
1755 mutex_lock(&n_port->lp_mutex);
1756 list_del(&vn_port->list);
1757 mutex_unlock(&n_port->lp_mutex);
1758
1759 fc_fabric_logoff(vn_port);
1760 fc_lport_destroy(vn_port);
1761
1762 /* Detach from scsi-ml */
1763 fc_remove_host(vn_port->host);
1764 scsi_remove_host(vn_port->host);
1765
1766 /*
1767 * Only try to release the exchange manager if the vn_port
1768 * configuration is complete.
1769 */
1770 if (vn_port->state == LPORT_ST_READY)
1771 fc_exch_mgr_free(vn_port);
1772
1773 /* Free memory used by statistical counters */
1774 fc_lport_free_stats(vn_port);
1775
1776 /* Release Scsi_Host */
1777 if (vn_port->host)
1778 scsi_host_put(vn_port->host);
1779
Chad Dupuis4f4616c2018-04-25 06:09:02 -07001780out:
Dupuis, Chad61d86582017-02-15 06:28:23 -08001781 return 0;
1782}
1783
1784static int qedf_vport_disable(struct fc_vport *vport, bool disable)
1785{
1786 struct fc_lport *lport = vport->dd_data;
1787
1788 if (disable) {
1789 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1790 fc_fabric_logoff(lport);
1791 } else {
1792 lport->boot_time = jiffies;
1793 fc_fabric_login(lport);
1794 fc_vport_setlink(lport);
1795 }
1796 return 0;
1797}
1798
1799/*
1800 * During removal we need to wait for all the vports associated with a port
1801 * to be destroyed so we avoid a race condition where libfc is still trying
1802 * to reap vports while the driver remove function has already reaped the
1803 * driver contexts associated with the physical port.
1804 */
1805static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
1806{
1807 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
1808
1809 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
1810 "Entered.\n");
1811 while (fc_host->npiv_vports_inuse > 0) {
1812 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
1813 "Waiting for all vports to be reaped.\n");
1814 msleep(1000);
1815 }
1816}
1817
1818/**
1819 * qedf_fcoe_reset - Resets the fcoe
1820 *
1821 * @shost: shost the reset is from
1822 *
1823 * Returns: always 0
1824 */
1825static int qedf_fcoe_reset(struct Scsi_Host *shost)
1826{
1827 struct fc_lport *lport = shost_priv(shost);
1828
Chad Dupuis5cf446d2017-05-31 06:33:55 -07001829 qedf_ctx_soft_reset(lport);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001830 return 0;
1831}
1832
1833static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
1834 *shost)
1835{
1836 struct fc_host_statistics *qedf_stats;
1837 struct fc_lport *lport = shost_priv(shost);
1838 struct qedf_ctx *qedf = lport_priv(lport);
1839 struct qed_fcoe_stats *fw_fcoe_stats;
1840
1841 qedf_stats = fc_get_host_stats(shost);
1842
1843 /* We don't collect offload stats for specific NPIV ports */
1844 if (lport->vport)
1845 goto out;
1846
1847 fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
1848 if (!fw_fcoe_stats) {
1849 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
1850 "fw_fcoe_stats.\n");
1851 goto out;
1852 }
1853
Chad Dupuis642a0b32018-05-22 00:28:43 -07001854 mutex_lock(&qedf->stats_mutex);
1855
Dupuis, Chad61d86582017-02-15 06:28:23 -08001856 /* Query firmware for offload stats */
1857 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
1858
1859 /*
1860 * The expectation is that we add our offload stats to the stats
1861 * being maintained by libfc each time the fc_get_host_status callback
1862 * is invoked. The additions are not carried over for each call to
1863 * the fc_get_host_stats callback.
1864 */
1865 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
1866 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
1867 fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
1868 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
1869 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
1870 fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
1871 qedf_stats->fcp_input_megabytes +=
1872 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
1873 qedf_stats->fcp_output_megabytes +=
1874 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
1875 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
1876 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
1877 qedf_stats->invalid_crc_count +=
1878 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
1879 qedf_stats->dumped_frames =
1880 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
1881 qedf_stats->error_frames +=
1882 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
1883 qedf_stats->fcp_input_requests += qedf->input_requests;
1884 qedf_stats->fcp_output_requests += qedf->output_requests;
1885 qedf_stats->fcp_control_requests += qedf->control_requests;
1886 qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
1887 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
1888
Chad Dupuis642a0b32018-05-22 00:28:43 -07001889 mutex_unlock(&qedf->stats_mutex);
Dupuis, Chad61d86582017-02-15 06:28:23 -08001890 kfree(fw_fcoe_stats);
1891out:
1892 return qedf_stats;
1893}
1894
1895static struct fc_function_template qedf_fc_transport_fn = {
1896 .show_host_node_name = 1,
1897 .show_host_port_name = 1,
1898 .show_host_supported_classes = 1,
1899 .show_host_supported_fc4s = 1,
1900 .show_host_active_fc4s = 1,
1901 .show_host_maxframe_size = 1,
1902
1903 .show_host_port_id = 1,
1904 .show_host_supported_speeds = 1,
1905 .get_host_speed = fc_get_host_speed,
1906 .show_host_speed = 1,
1907 .show_host_port_type = 1,
1908 .get_host_port_state = fc_get_host_port_state,
1909 .show_host_port_state = 1,
1910 .show_host_symbolic_name = 1,
1911
1912 /*
1913 * Tell FC transport to allocate enough space to store the backpointer
1914 * for the associate qedf_rport struct.
1915 */
1916 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
1917 sizeof(struct qedf_rport)),
1918 .show_rport_maxframe_size = 1,
1919 .show_rport_supported_classes = 1,
1920 .show_host_fabric_name = 1,
1921 .show_starget_node_name = 1,
1922 .show_starget_port_name = 1,
1923 .show_starget_port_id = 1,
1924 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
1925 .show_rport_dev_loss_tmo = 1,
1926 .get_fc_host_stats = qedf_fc_get_host_stats,
1927 .issue_fc_host_lip = qedf_fcoe_reset,
1928 .vport_create = qedf_vport_create,
1929 .vport_delete = qedf_vport_destroy,
1930 .vport_disable = qedf_vport_disable,
1931 .bsg_request = fc_lport_bsg_request,
1932};
1933
1934static struct fc_function_template qedf_fc_vport_transport_fn = {
1935 .show_host_node_name = 1,
1936 .show_host_port_name = 1,
1937 .show_host_supported_classes = 1,
1938 .show_host_supported_fc4s = 1,
1939 .show_host_active_fc4s = 1,
1940 .show_host_maxframe_size = 1,
1941 .show_host_port_id = 1,
1942 .show_host_supported_speeds = 1,
1943 .get_host_speed = fc_get_host_speed,
1944 .show_host_speed = 1,
1945 .show_host_port_type = 1,
1946 .get_host_port_state = fc_get_host_port_state,
1947 .show_host_port_state = 1,
1948 .show_host_symbolic_name = 1,
1949 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
1950 sizeof(struct qedf_rport)),
1951 .show_rport_maxframe_size = 1,
1952 .show_rport_supported_classes = 1,
1953 .show_host_fabric_name = 1,
1954 .show_starget_node_name = 1,
1955 .show_starget_port_name = 1,
1956 .show_starget_port_id = 1,
1957 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
1958 .show_rport_dev_loss_tmo = 1,
1959 .get_fc_host_stats = fc_get_host_stats,
1960 .issue_fc_host_lip = qedf_fcoe_reset,
1961 .bsg_request = fc_lport_bsg_request,
1962};
1963
1964static bool qedf_fp_has_work(struct qedf_fastpath *fp)
1965{
1966 struct qedf_ctx *qedf = fp->qedf;
1967 struct global_queue *que;
1968 struct qed_sb_info *sb_info = fp->sb_info;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02001969 struct status_block_e4 *sb = sb_info->sb_virt;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001970 u16 prod_idx;
1971
1972 /* Get the pointer to the global CQ this completion is on */
1973 que = qedf->global_queues[fp->sb_id];
1974
1975 /* Be sure all responses have been written to PI */
1976 rmb();
1977
1978 /* Get the current firmware producer index */
1979 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
1980
1981 return (que->cq_prod_idx != prod_idx);
1982}
1983
1984/*
1985 * Interrupt handler code.
1986 */
1987
1988/* Process completion queue and copy CQE contents for deferred processesing
1989 *
1990 * Return true if we should wake the I/O thread, false if not.
1991 */
1992static bool qedf_process_completions(struct qedf_fastpath *fp)
1993{
1994 struct qedf_ctx *qedf = fp->qedf;
1995 struct qed_sb_info *sb_info = fp->sb_info;
Tomer Tayar21dd79e2017-12-27 19:30:06 +02001996 struct status_block_e4 *sb = sb_info->sb_virt;
Dupuis, Chad61d86582017-02-15 06:28:23 -08001997 struct global_queue *que;
1998 u16 prod_idx;
1999 struct fcoe_cqe *cqe;
2000 struct qedf_io_work *io_work;
2001 int num_handled = 0;
2002 unsigned int cpu;
2003 struct qedf_ioreq *io_req = NULL;
2004 u16 xid;
2005 u16 new_cqes;
2006 u32 comp_type;
2007
2008 /* Get the current firmware producer index */
2009 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2010
2011 /* Get the pointer to the global CQ this completion is on */
2012 que = qedf->global_queues[fp->sb_id];
2013
2014 /* Calculate the amount of new elements since last processing */
2015 new_cqes = (prod_idx >= que->cq_prod_idx) ?
2016 (prod_idx - que->cq_prod_idx) :
2017 0x10000 - que->cq_prod_idx + prod_idx;
2018
2019 /* Save producer index */
2020 que->cq_prod_idx = prod_idx;
2021
2022 while (new_cqes) {
2023 fp->completions++;
2024 num_handled++;
2025 cqe = &que->cq[que->cq_cons_idx];
2026
2027 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2028 FCOE_CQE_CQE_TYPE_MASK;
2029
2030 /*
2031 * Process unsolicited CQEs directly in the interrupt handler
2032 * sine we need the fastpath ID
2033 */
2034 if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
2035 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2036 "Unsolicated CQE.\n");
2037 qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2038 /*
2039 * Don't add a work list item. Increment consumer
2040 * consumer index and move on.
2041 */
2042 goto inc_idx;
2043 }
2044
2045 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2046 io_req = &qedf->cmd_mgr->cmds[xid];
2047
2048 /*
2049 * Figure out which percpu thread we should queue this I/O
2050 * on.
2051 */
2052 if (!io_req)
2053 /* If there is not io_req assocated with this CQE
2054 * just queue it on CPU 0
2055 */
2056 cpu = 0;
2057 else {
2058 cpu = io_req->cpu;
2059 io_req->int_cpu = smp_processor_id();
2060 }
2061
2062 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2063 if (!io_work) {
2064 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2065 "work for I/O completion.\n");
2066 continue;
2067 }
2068 memset(io_work, 0, sizeof(struct qedf_io_work));
2069
2070 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2071
2072 /* Copy contents of CQE for deferred processing */
2073 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2074
2075 io_work->qedf = fp->qedf;
2076 io_work->fp = NULL; /* Only used for unsolicited frames */
2077
2078 queue_work_on(cpu, qedf_io_wq, &io_work->work);
2079
2080inc_idx:
2081 que->cq_cons_idx++;
2082 if (que->cq_cons_idx == fp->cq_num_entries)
2083 que->cq_cons_idx = 0;
2084 new_cqes--;
2085 }
2086
2087 return true;
2088}
2089
2090
2091/* MSI-X fastpath handler code */
2092static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
2093{
2094 struct qedf_fastpath *fp = dev_id;
2095
2096 if (!fp) {
2097 QEDF_ERR(NULL, "fp is null.\n");
2098 return IRQ_HANDLED;
2099 }
2100 if (!fp->sb_info) {
2101 QEDF_ERR(NULL, "fp->sb_info in null.");
2102 return IRQ_HANDLED;
2103 }
2104
2105 /*
2106 * Disable interrupts for this status block while we process new
2107 * completions
2108 */
2109 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
2110
2111 while (1) {
2112 qedf_process_completions(fp);
2113
2114 if (qedf_fp_has_work(fp) == 0) {
2115 /* Update the sb information */
2116 qed_sb_update_sb_idx(fp->sb_info);
2117
2118 /* Check for more work */
2119 rmb();
2120
2121 if (qedf_fp_has_work(fp) == 0) {
2122 /* Re-enable interrupts */
2123 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
2124 return IRQ_HANDLED;
2125 }
2126 }
2127 }
2128
2129 /* Do we ever want to break out of above loop? */
2130 return IRQ_HANDLED;
2131}
2132
2133/* simd handler for MSI/INTa */
2134static void qedf_simd_int_handler(void *cookie)
2135{
2136 /* Cookie is qedf_ctx struct */
2137 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2138
2139 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2140}
2141
2142#define QEDF_SIMD_HANDLER_NUM 0
2143static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2144{
2145 int i;
2146
2147 if (qedf->int_info.msix_cnt) {
2148 for (i = 0; i < qedf->int_info.used_cnt; i++) {
2149 synchronize_irq(qedf->int_info.msix[i].vector);
2150 irq_set_affinity_hint(qedf->int_info.msix[i].vector,
2151 NULL);
2152 irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
2153 NULL);
2154 free_irq(qedf->int_info.msix[i].vector,
2155 &qedf->fp_array[i]);
2156 }
2157 } else
2158 qed_ops->common->simd_handler_clean(qedf->cdev,
2159 QEDF_SIMD_HANDLER_NUM);
2160
2161 qedf->int_info.used_cnt = 0;
2162 qed_ops->common->set_fp_int(qedf->cdev, 0);
2163}
2164
2165static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2166{
2167 int i, rc, cpu;
2168
2169 cpu = cpumask_first(cpu_online_mask);
2170 for (i = 0; i < qedf->num_queues; i++) {
2171 rc = request_irq(qedf->int_info.msix[i].vector,
2172 qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
2173
2174 if (rc) {
2175 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2176 qedf_sync_free_irqs(qedf);
2177 return rc;
2178 }
2179
2180 qedf->int_info.used_cnt++;
2181 rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
2182 get_cpu_mask(cpu));
2183 cpu = cpumask_next(cpu, cpu_online_mask);
2184 }
2185
2186 return 0;
2187}
2188
2189static int qedf_setup_int(struct qedf_ctx *qedf)
2190{
2191 int rc = 0;
2192
2193 /*
2194 * Learn interrupt configuration
2195 */
2196 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
Chad Dupuis914fff12017-05-31 06:33:50 -07002197 if (rc <= 0)
2198 return 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002199
2200 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2201 if (rc)
2202 return 0;
2203
2204 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2205 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2206 num_online_cpus());
2207
2208 if (qedf->int_info.msix_cnt)
2209 return qedf_request_msix_irq(qedf);
2210
2211 qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2212 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2213 qedf->int_info.used_cnt = 1;
2214
Chad Dupuis96673e12018-04-25 06:09:00 -07002215 QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
2216 return -EINVAL;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002217}
2218
2219/* Main function for libfc frame reception */
2220static void qedf_recv_frame(struct qedf_ctx *qedf,
2221 struct sk_buff *skb)
2222{
2223 u32 fr_len;
2224 struct fc_lport *lport;
2225 struct fc_frame_header *fh;
2226 struct fcoe_crc_eof crc_eof;
2227 struct fc_frame *fp;
2228 u8 *mac = NULL;
2229 u8 *dest_mac = NULL;
2230 struct fcoe_hdr *hp;
2231 struct qedf_rport *fcport;
Chad Dupuis384d5a92017-05-31 06:33:57 -07002232 struct fc_lport *vn_port;
2233 u32 f_ctl;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002234
2235 lport = qedf->lport;
2236 if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2237 QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2238 kfree_skb(skb);
2239 return;
2240 }
2241
2242 if (skb_is_nonlinear(skb))
2243 skb_linearize(skb);
2244 mac = eth_hdr(skb)->h_source;
2245 dest_mac = eth_hdr(skb)->h_dest;
2246
2247 /* Pull the header */
2248 hp = (struct fcoe_hdr *)skb->data;
2249 fh = (struct fc_frame_header *) skb_transport_header(skb);
2250 skb_pull(skb, sizeof(struct fcoe_hdr));
2251 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2252
2253 fp = (struct fc_frame *)skb;
2254 fc_frame_init(fp);
2255 fr_dev(fp) = lport;
2256 fr_sof(fp) = hp->fcoe_sof;
2257 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2258 kfree_skb(skb);
2259 return;
2260 }
2261 fr_eof(fp) = crc_eof.fcoe_eof;
2262 fr_crc(fp) = crc_eof.fcoe_crc32;
2263 if (pskb_trim(skb, fr_len)) {
2264 kfree_skb(skb);
2265 return;
2266 }
2267
2268 fh = fc_frame_header_get(fp);
2269
Chad Dupuis384d5a92017-05-31 06:33:57 -07002270 /*
2271 * Invalid frame filters.
2272 */
2273
Dupuis, Chad61d86582017-02-15 06:28:23 -08002274 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2275 fh->fh_type == FC_TYPE_FCP) {
2276 /* Drop FCP data. We dont this in L2 path */
2277 kfree_skb(skb);
2278 return;
2279 }
2280 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2281 fh->fh_type == FC_TYPE_ELS) {
2282 switch (fc_frame_payload_op(fp)) {
2283 case ELS_LOGO:
2284 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
2285 /* drop non-FIP LOGO */
2286 kfree_skb(skb);
2287 return;
2288 }
2289 break;
2290 }
2291 }
2292
2293 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2294 /* Drop incoming ABTS */
2295 kfree_skb(skb);
2296 return;
2297 }
2298
Chad Dupuis384d5a92017-05-31 06:33:57 -07002299 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
Chad Dupuisf7e8d572017-05-31 06:33:59 -07002300 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2301 "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
Chad Dupuisf32803b2018-04-25 06:08:47 -07002302 kfree_skb(skb);
Chad Dupuis384d5a92017-05-31 06:33:57 -07002303 return;
2304 }
2305
2306 if (qedf->ctlr.state) {
2307 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
Chad Dupuisf7e8d572017-05-31 06:33:59 -07002308 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2309 "Wrong source address: mac:%pM dest_addr:%pM.\n",
Chad Dupuis384d5a92017-05-31 06:33:57 -07002310 mac, qedf->ctlr.dest_addr);
2311 kfree_skb(skb);
2312 return;
2313 }
2314 }
2315
2316 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
2317
2318 /*
2319 * If the destination ID from the frame header does not match what we
2320 * have on record for lport and the search for a NPIV port came up
2321 * empty then this is not addressed to our port so simply drop it.
2322 */
2323 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
Chad Dupuisf7e8d572017-05-31 06:33:59 -07002324 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2325 "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
Chad Dupuis384d5a92017-05-31 06:33:57 -07002326 lport->port_id, ntoh24(fh->fh_d_id));
2327 kfree_skb(skb);
2328 return;
2329 }
2330
2331 f_ctl = ntoh24(fh->fh_f_ctl);
2332 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2333 (f_ctl & FC_FC_EX_CTX)) {
2334 /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2335 kfree_skb(skb);
2336 return;
2337 }
2338
Dupuis, Chad61d86582017-02-15 06:28:23 -08002339 /*
2340 * If a connection is uploading, drop incoming FCoE frames as there
2341 * is a small window where we could try to return a frame while libfc
2342 * is trying to clean things up.
2343 */
2344
2345 /* Get fcport associated with d_id if it exists */
2346 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2347
2348 if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2349 &fcport->flags)) {
2350 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2351 "Connection uploading, dropping fp=%p.\n", fp);
2352 kfree_skb(skb);
2353 return;
2354 }
2355
2356 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2357 "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2358 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2359 fh->fh_type);
2360 if (qedf_dump_frames)
2361 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
2362 1, skb->data, skb->len, false);
2363 fc_exch_recv(lport, fp);
2364}
2365
2366static void qedf_ll2_process_skb(struct work_struct *work)
2367{
2368 struct qedf_skb_work *skb_work =
2369 container_of(work, struct qedf_skb_work, work);
2370 struct qedf_ctx *qedf = skb_work->qedf;
2371 struct sk_buff *skb = skb_work->skb;
2372 struct ethhdr *eh;
2373
2374 if (!qedf) {
2375 QEDF_ERR(NULL, "qedf is NULL\n");
2376 goto err_out;
2377 }
2378
2379 eh = (struct ethhdr *)skb->data;
2380
2381 /* Undo VLAN encapsulation */
2382 if (eh->h_proto == htons(ETH_P_8021Q)) {
2383 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
Johannes Bergaf728682017-06-16 14:29:22 +02002384 eh = skb_pull(skb, VLAN_HLEN);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002385 skb_reset_mac_header(skb);
2386 }
2387
2388 /*
2389 * Process either a FIP frame or FCoE frame based on the
2390 * protocol value. If it's not either just drop the
2391 * frame.
2392 */
2393 if (eh->h_proto == htons(ETH_P_FIP)) {
2394 qedf_fip_recv(qedf, skb);
2395 goto out;
2396 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
2397 __skb_pull(skb, ETH_HLEN);
2398 qedf_recv_frame(qedf, skb);
2399 goto out;
2400 } else
2401 goto err_out;
2402
2403err_out:
2404 kfree_skb(skb);
2405out:
2406 kfree(skb_work);
2407 return;
2408}
2409
2410static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2411 u32 arg1, u32 arg2)
2412{
2413 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2414 struct qedf_skb_work *skb_work;
2415
2416 skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
2417 if (!skb_work) {
2418 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2419 "dropping frame.\n");
2420 kfree_skb(skb);
2421 return 0;
2422 }
2423
2424 INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2425 skb_work->skb = skb;
2426 skb_work->qedf = qedf;
2427 queue_work(qedf->ll2_recv_wq, &skb_work->work);
2428
2429 return 0;
2430}
2431
2432static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2433 .rx_cb = qedf_ll2_rx,
2434 .tx_cb = NULL,
2435};
2436
2437/* Main thread to process I/O completions */
2438void qedf_fp_io_handler(struct work_struct *work)
2439{
2440 struct qedf_io_work *io_work =
2441 container_of(work, struct qedf_io_work, work);
2442 u32 comp_type;
2443
2444 /*
2445 * Deferred part of unsolicited CQE sends
2446 * frame to libfc.
2447 */
2448 comp_type = (io_work->cqe.cqe_data >>
2449 FCOE_CQE_CQE_TYPE_SHIFT) &
2450 FCOE_CQE_CQE_TYPE_MASK;
2451 if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2452 io_work->fp)
2453 fc_exch_recv(io_work->qedf->lport, io_work->fp);
2454 else
2455 qedf_process_cqe(io_work->qedf, &io_work->cqe);
2456
2457 kfree(io_work);
2458}
2459
2460static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2461 struct qed_sb_info *sb_info, u16 sb_id)
2462{
Tomer Tayar21dd79e2017-12-27 19:30:06 +02002463 struct status_block_e4 *sb_virt;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002464 dma_addr_t sb_phys;
2465 int ret;
2466
2467 sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
Tomer Tayar21dd79e2017-12-27 19:30:06 +02002468 sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002469
2470 if (!sb_virt) {
2471 QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
2472 "for id = %d.\n", sb_id);
2473 return -ENOMEM;
2474 }
2475
2476 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2477 sb_id, QED_SB_TYPE_STORAGE);
2478
2479 if (ret) {
2480 QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
2481 "failed for id = %d.\n", sb_id);
2482 return ret;
2483 }
2484
2485 return 0;
2486}
2487
2488static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2489{
2490 if (sb_info->sb_virt)
2491 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2492 (void *)sb_info->sb_virt, sb_info->sb_phys);
2493}
2494
2495static void qedf_destroy_sb(struct qedf_ctx *qedf)
2496{
2497 int id;
2498 struct qedf_fastpath *fp = NULL;
2499
2500 for (id = 0; id < qedf->num_queues; id++) {
2501 fp = &(qedf->fp_array[id]);
2502 if (fp->sb_id == QEDF_SB_ID_NULL)
2503 break;
2504 qedf_free_sb(qedf, fp->sb_info);
2505 kfree(fp->sb_info);
2506 }
2507 kfree(qedf->fp_array);
2508}
2509
2510static int qedf_prepare_sb(struct qedf_ctx *qedf)
2511{
2512 int id;
2513 struct qedf_fastpath *fp;
2514 int ret;
2515
2516 qedf->fp_array =
2517 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2518 GFP_KERNEL);
2519
2520 if (!qedf->fp_array) {
2521 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2522 "failed.\n");
2523 return -ENOMEM;
2524 }
2525
2526 for (id = 0; id < qedf->num_queues; id++) {
2527 fp = &(qedf->fp_array[id]);
2528 fp->sb_id = QEDF_SB_ID_NULL;
2529 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2530 if (!fp->sb_info) {
2531 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2532 "allocation failed.\n");
2533 goto err;
2534 }
2535 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2536 if (ret) {
2537 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2538 "initialization failed.\n");
2539 goto err;
2540 }
2541 fp->sb_id = id;
2542 fp->qedf = qedf;
2543 fp->cq_num_entries =
2544 qedf->global_queues[id]->cq_mem_size /
2545 sizeof(struct fcoe_cqe);
2546 }
2547err:
2548 return 0;
2549}
2550
2551void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2552{
2553 u16 xid;
2554 struct qedf_ioreq *io_req;
2555 struct qedf_rport *fcport;
2556 u32 comp_type;
2557
2558 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2559 FCOE_CQE_CQE_TYPE_MASK;
2560
2561 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2562 io_req = &qedf->cmd_mgr->cmds[xid];
2563
2564 /* Completion not for a valid I/O anymore so just return */
2565 if (!io_req)
2566 return;
2567
2568 fcport = io_req->fcport;
2569
2570 if (fcport == NULL) {
2571 QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
2572 return;
2573 }
2574
2575 /*
2576 * Check that fcport is offloaded. If it isn't then the spinlock
2577 * isn't valid and shouldn't be taken. We should just return.
2578 */
2579 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2580 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
2581 return;
2582 }
2583
2584
2585 switch (comp_type) {
2586 case FCOE_GOOD_COMPLETION_CQE_TYPE:
2587 atomic_inc(&fcport->free_sqes);
2588 switch (io_req->cmd_type) {
2589 case QEDF_SCSI_CMD:
2590 qedf_scsi_completion(qedf, cqe, io_req);
2591 break;
2592 case QEDF_ELS:
2593 qedf_process_els_compl(qedf, cqe, io_req);
2594 break;
2595 case QEDF_TASK_MGMT_CMD:
2596 qedf_process_tmf_compl(qedf, cqe, io_req);
2597 break;
2598 case QEDF_SEQ_CLEANUP:
2599 qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2600 break;
2601 }
2602 break;
2603 case FCOE_ERROR_DETECTION_CQE_TYPE:
2604 atomic_inc(&fcport->free_sqes);
2605 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2606 "Error detect CQE.\n");
2607 qedf_process_error_detect(qedf, cqe, io_req);
2608 break;
2609 case FCOE_EXCH_CLEANUP_CQE_TYPE:
2610 atomic_inc(&fcport->free_sqes);
2611 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2612 "Cleanup CQE.\n");
2613 qedf_process_cleanup_compl(qedf, cqe, io_req);
2614 break;
2615 case FCOE_ABTS_CQE_TYPE:
2616 atomic_inc(&fcport->free_sqes);
2617 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2618 "Abort CQE.\n");
2619 qedf_process_abts_compl(qedf, cqe, io_req);
2620 break;
2621 case FCOE_DUMMY_CQE_TYPE:
2622 atomic_inc(&fcport->free_sqes);
2623 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2624 "Dummy CQE.\n");
2625 break;
2626 case FCOE_LOCAL_COMP_CQE_TYPE:
2627 atomic_inc(&fcport->free_sqes);
2628 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2629 "Local completion CQE.\n");
2630 break;
2631 case FCOE_WARNING_CQE_TYPE:
2632 atomic_inc(&fcport->free_sqes);
2633 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2634 "Warning CQE.\n");
2635 qedf_process_warning_compl(qedf, cqe, io_req);
2636 break;
2637 case MAX_FCOE_CQE_TYPE:
2638 atomic_inc(&fcport->free_sqes);
2639 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2640 "Max FCoE CQE.\n");
2641 break;
2642 default:
2643 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2644 "Default CQE.\n");
2645 break;
2646 }
2647}
2648
2649static void qedf_free_bdq(struct qedf_ctx *qedf)
2650{
2651 int i;
2652
2653 if (qedf->bdq_pbl_list)
2654 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2655 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2656
2657 if (qedf->bdq_pbl)
2658 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2659 qedf->bdq_pbl, qedf->bdq_pbl_dma);
2660
2661 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2662 if (qedf->bdq[i].buf_addr) {
2663 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2664 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2665 }
2666 }
2667}
2668
2669static void qedf_free_global_queues(struct qedf_ctx *qedf)
2670{
2671 int i;
2672 struct global_queue **gl = qedf->global_queues;
2673
2674 for (i = 0; i < qedf->num_queues; i++) {
2675 if (!gl[i])
2676 continue;
2677
2678 if (gl[i]->cq)
2679 dma_free_coherent(&qedf->pdev->dev,
2680 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
2681 if (gl[i]->cq_pbl)
2682 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2683 gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
2684
2685 kfree(gl[i]);
2686 }
2687
2688 qedf_free_bdq(qedf);
2689}
2690
2691static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2692{
2693 int i;
2694 struct scsi_bd *pbl;
2695 u64 *list;
2696 dma_addr_t page;
2697
2698 /* Alloc dma memory for BDQ buffers */
2699 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2700 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2701 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2702 if (!qedf->bdq[i].buf_addr) {
2703 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2704 "buffer %d.\n", i);
2705 return -ENOMEM;
2706 }
2707 }
2708
2709 /* Alloc dma memory for BDQ page buffer list */
2710 qedf->bdq_pbl_mem_size =
2711 QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2712 qedf->bdq_pbl_mem_size =
2713 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2714
2715 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2716 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2717 if (!qedf->bdq_pbl) {
2718 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2719 return -ENOMEM;
2720 }
2721
2722 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
Joe Perchesfd2b18b2017-03-06 10:32:27 -08002723 "BDQ PBL addr=0x%p dma=%pad\n",
2724 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002725
2726 /*
2727 * Populate BDQ PBL with physical and virtual address of individual
2728 * BDQ buffers
2729 */
2730 pbl = (struct scsi_bd *)qedf->bdq_pbl;
2731 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2732 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2733 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
Tomer Tayarda090912017-12-27 19:30:07 +02002734 pbl->opaque.fcoe_opaque.hi = 0;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002735 /* Opaque lo data is an index into the BDQ array */
Tomer Tayarda090912017-12-27 19:30:07 +02002736 pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002737 pbl++;
2738 }
2739
2740 /* Allocate list of PBL pages */
Luis Chamberlain750afb02019-01-04 09:23:09 +01002741 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2742 QEDF_PAGE_SIZE,
2743 &qedf->bdq_pbl_list_dma,
2744 GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002745 if (!qedf->bdq_pbl_list) {
Christophe JAILLET32eebb32017-06-11 08:16:04 +02002746 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08002747 return -ENOMEM;
2748 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002749
2750 /*
2751 * Now populate PBL list with pages that contain pointers to the
2752 * individual buffers.
2753 */
2754 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
2755 QEDF_PAGE_SIZE;
2756 list = (u64 *)qedf->bdq_pbl_list;
2757 page = qedf->bdq_pbl_list_dma;
2758 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
2759 *list = qedf->bdq_pbl_dma;
2760 list++;
2761 page += QEDF_PAGE_SIZE;
2762 }
2763
2764 return 0;
2765}
2766
2767static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2768{
2769 u32 *list;
2770 int i;
2771 int status = 0, rc;
2772 u32 *pbl;
2773 dma_addr_t page;
2774 int num_pages;
2775
2776 /* Allocate and map CQs, RQs */
2777 /*
2778 * Number of global queues (CQ / RQ). This should
2779 * be <= number of available MSIX vectors for the PF
2780 */
2781 if (!qedf->num_queues) {
2782 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
2783 return 1;
2784 }
2785
2786 /*
2787 * Make sure we allocated the PBL that will contain the physical
2788 * addresses of our queues
2789 */
2790 if (!qedf->p_cpuq) {
2791 status = 1;
2792 goto mem_alloc_failure;
2793 }
2794
2795 qedf->global_queues = kzalloc((sizeof(struct global_queue *)
2796 * qedf->num_queues), GFP_KERNEL);
2797 if (!qedf->global_queues) {
2798 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
2799 "queues array ptr memory\n");
2800 return -ENOMEM;
2801 }
2802 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2803 "qedf->global_queues=%p.\n", qedf->global_queues);
2804
2805 /* Allocate DMA coherent buffers for BDQ */
2806 rc = qedf_alloc_bdq(qedf);
2807 if (rc)
2808 goto mem_alloc_failure;
2809
2810 /* Allocate a CQ and an associated PBL for each MSI-X vector */
2811 for (i = 0; i < qedf->num_queues; i++) {
2812 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
2813 GFP_KERNEL);
2814 if (!qedf->global_queues[i]) {
Christophe JAILLET3a240b22017-06-11 08:16:02 +02002815 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
Dupuis, Chad61d86582017-02-15 06:28:23 -08002816 "global queue %d.\n", i);
Christophe JAILLET3a240b22017-06-11 08:16:02 +02002817 status = -ENOMEM;
Dupuis, Chad61d86582017-02-15 06:28:23 -08002818 goto mem_alloc_failure;
2819 }
2820
2821 qedf->global_queues[i]->cq_mem_size =
2822 FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
2823 qedf->global_queues[i]->cq_mem_size =
2824 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
2825
2826 qedf->global_queues[i]->cq_pbl_size =
2827 (qedf->global_queues[i]->cq_mem_size /
2828 PAGE_SIZE) * sizeof(void *);
2829 qedf->global_queues[i]->cq_pbl_size =
2830 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
2831
2832 qedf->global_queues[i]->cq =
Luis Chamberlain750afb02019-01-04 09:23:09 +01002833 dma_alloc_coherent(&qedf->pdev->dev,
2834 qedf->global_queues[i]->cq_mem_size,
2835 &qedf->global_queues[i]->cq_dma,
2836 GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002837
2838 if (!qedf->global_queues[i]->cq) {
Christophe JAILLET32eebb32017-06-11 08:16:04 +02002839 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08002840 status = -ENOMEM;
2841 goto mem_alloc_failure;
2842 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002843
2844 qedf->global_queues[i]->cq_pbl =
Luis Chamberlain750afb02019-01-04 09:23:09 +01002845 dma_alloc_coherent(&qedf->pdev->dev,
2846 qedf->global_queues[i]->cq_pbl_size,
2847 &qedf->global_queues[i]->cq_pbl_dma,
2848 GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002849
2850 if (!qedf->global_queues[i]->cq_pbl) {
Christophe JAILLET32eebb32017-06-11 08:16:04 +02002851 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08002852 status = -ENOMEM;
2853 goto mem_alloc_failure;
2854 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08002855
2856 /* Create PBL */
2857 num_pages = qedf->global_queues[i]->cq_mem_size /
2858 QEDF_PAGE_SIZE;
2859 page = qedf->global_queues[i]->cq_dma;
2860 pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
2861
2862 while (num_pages--) {
2863 *pbl = U64_LO(page);
2864 pbl++;
2865 *pbl = U64_HI(page);
2866 pbl++;
2867 page += QEDF_PAGE_SIZE;
2868 }
2869 /* Set the initial consumer index for cq */
2870 qedf->global_queues[i]->cq_cons_idx = 0;
2871 }
2872
2873 list = (u32 *)qedf->p_cpuq;
2874
2875 /*
2876 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
2877 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
2878 * to the physical address which contains an array of pointers to
2879 * the physical addresses of the specific queue pages.
2880 */
2881 for (i = 0; i < qedf->num_queues; i++) {
2882 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
2883 list++;
2884 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
2885 list++;
2886 *list = U64_LO(0);
2887 list++;
2888 *list = U64_HI(0);
2889 list++;
2890 }
2891
2892 return 0;
2893
2894mem_alloc_failure:
2895 qedf_free_global_queues(qedf);
2896 return status;
2897}
2898
2899static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
2900{
2901 u8 sq_num_pbl_pages;
2902 u32 sq_mem_size;
2903 u32 cq_mem_size;
2904 u32 cq_num_entries;
2905 int rval;
2906
2907 /*
2908 * The number of completion queues/fastpath interrupts/status blocks
2909 * we allocation is the minimum off:
2910 *
2911 * Number of CPUs
Thomas Bogendoerfer722477c2017-07-25 11:19:21 +02002912 * Number allocated by qed for our PCI function
Dupuis, Chad61d86582017-02-15 06:28:23 -08002913 */
Thomas Bogendoerfer722477c2017-07-25 11:19:21 +02002914 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002915
2916 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
2917 qedf->num_queues);
2918
Christoph Hellwig332d84f2018-10-10 20:04:15 +02002919 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
Dupuis, Chad61d86582017-02-15 06:28:23 -08002920 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
Christoph Hellwig332d84f2018-10-10 20:04:15 +02002921 &qedf->hw_p_cpuq, GFP_KERNEL);
Dupuis, Chad61d86582017-02-15 06:28:23 -08002922
2923 if (!qedf->p_cpuq) {
Christoph Hellwig332d84f2018-10-10 20:04:15 +02002924 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
Dupuis, Chad61d86582017-02-15 06:28:23 -08002925 return 1;
2926 }
2927
2928 rval = qedf_alloc_global_queues(qedf);
2929 if (rval) {
2930 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
2931 "failed.\n");
2932 return 1;
2933 }
2934
2935 /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
2936 sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
2937 sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
2938 sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
2939
2940 /* Calculate CQ num entries */
2941 cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
2942 cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
2943 cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
2944
Christophe JAILLET32eebb32017-06-11 08:16:04 +02002945 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
Dupuis, Chad61d86582017-02-15 06:28:23 -08002946
2947 /* Setup the value for fcoe PF */
2948 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
2949 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
2950 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
2951 (u64)qedf->hw_p_cpuq;
2952 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
2953
2954 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
2955
2956 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
2957 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
2958
2959 /* log_page_size: 12 for 4KB pages */
2960 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
2961
2962 qedf->pf_params.fcoe_pf_params.mtu = 9000;
2963 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
2964 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
2965
2966 /* BDQ address and size */
2967 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
2968 qedf->bdq_pbl_list_dma;
2969 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
2970 qedf->bdq_pbl_list_num_entries;
2971 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
2972
2973 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2974 "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
2975 qedf->bdq_pbl_list,
2976 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
2977 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
2978
2979 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2980 "cq_num_entries=%d.\n",
2981 qedf->pf_params.fcoe_pf_params.cq_num_entries);
2982
2983 return 0;
2984}
2985
2986/* Free DMA coherent memory for array of queue pointers we pass to qed */
2987static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
2988{
2989 size_t size = 0;
2990
2991 if (qedf->p_cpuq) {
2992 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
Christoph Hellwig332d84f2018-10-10 20:04:15 +02002993 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
Dupuis, Chad61d86582017-02-15 06:28:23 -08002994 qedf->hw_p_cpuq);
2995 }
2996
2997 qedf_free_global_queues(qedf);
2998
Thomas Meyerf3e46ac2018-12-02 21:52:11 +01002999 kfree(qedf->global_queues);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003000}
3001
3002/*
3003 * PCI driver functions
3004 */
3005
3006static const struct pci_device_id qedf_pci_tbl[] = {
3007 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
3008 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
3009 {0}
3010};
3011MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
3012
3013static struct pci_driver qedf_pci_driver = {
3014 .name = QEDF_MODULE_NAME,
3015 .id_table = qedf_pci_tbl,
3016 .probe = qedf_probe,
3017 .remove = qedf_remove,
3018};
3019
3020static int __qedf_probe(struct pci_dev *pdev, int mode)
3021{
3022 int rc = -EINVAL;
3023 struct fc_lport *lport;
3024 struct qedf_ctx *qedf;
3025 struct Scsi_Host *host;
3026 bool is_vf = false;
3027 struct qed_ll2_params params;
3028 char host_buf[20];
3029 struct qed_link_params link_params;
3030 int status;
3031 void *task_start, *task_end;
3032 struct qed_slowpath_params slowpath_params;
3033 struct qed_probe_params qed_params;
3034 u16 tmp;
3035
3036 /*
3037 * When doing error recovery we didn't reap the lport so don't try
3038 * to reallocate it.
3039 */
3040 if (mode != QEDF_MODE_RECOVERY) {
3041 lport = libfc_host_alloc(&qedf_host_template,
3042 sizeof(struct qedf_ctx));
3043
3044 if (!lport) {
3045 QEDF_ERR(NULL, "Could not allocate lport.\n");
3046 rc = -ENOMEM;
3047 goto err0;
3048 }
3049
Chad Dupuis0cbd0072019-03-26 00:38:41 -07003050 fc_disc_init(lport);
3051
Dupuis, Chad61d86582017-02-15 06:28:23 -08003052 /* Initialize qedf_ctx */
3053 qedf = lport_priv(lport);
3054 qedf->lport = lport;
3055 qedf->ctlr.lp = lport;
3056 qedf->pdev = pdev;
3057 qedf->dbg_ctx.pdev = pdev;
3058 qedf->dbg_ctx.host_no = lport->host->host_no;
3059 spin_lock_init(&qedf->hba_lock);
3060 INIT_LIST_HEAD(&qedf->fcports);
3061 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3062 atomic_set(&qedf->num_offloads, 0);
3063 qedf->stop_io_on_error = false;
3064 pci_set_drvdata(pdev, qedf);
Chad Dupuis8eaf7df2017-03-23 06:58:47 -07003065 init_completion(&qedf->fipvlan_compl);
Chad Dupuis642a0b32018-05-22 00:28:43 -07003066 mutex_init(&qedf->stats_mutex);
Saurav Kashyap69ef2c62019-03-26 00:38:38 -07003067 mutex_init(&qedf->flush_mutex);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003068
3069 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3070 "QLogic FastLinQ FCoE Module qedf %s, "
3071 "FW %d.%d.%d.%d\n", QEDF_VERSION,
3072 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
3073 FW_ENGINEERING_VERSION);
3074 } else {
3075 /* Init pointers during recovery */
3076 qedf = pci_get_drvdata(pdev);
3077 lport = qedf->lport;
3078 }
3079
3080 host = lport->host;
3081
3082 /* Allocate mempool for qedf_io_work structs */
3083 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3084 qedf_io_work_cache);
3085 if (qedf->io_mempool == NULL) {
3086 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3087 goto err1;
3088 }
3089 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3090 qedf->io_mempool);
3091
3092 sprintf(host_buf, "qedf_%u_link",
3093 qedf->lport->host->host_no);
Chad Dupuis428ca642017-08-15 10:08:19 -07003094 qedf->link_update_wq = create_workqueue(host_buf);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003095 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3096 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
Chad Dupuis4b9b7fa2018-04-25 06:08:58 -07003097 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003098 qedf->fipvlan_retries = qedf_fipvlan_retries;
Chad Dupuis84b2ba62018-04-25 06:08:52 -07003099 /* Set a default prio in case DCBX doesn't converge */
Chad Dupuis65b7bec2018-04-25 06:08:59 -07003100 if (qedf_default_prio > -1) {
3101 /*
3102 * This is the case where we pass a modparam in so we want to
3103 * honor it even if dcbx doesn't converge.
3104 */
3105 qedf->prio = qedf_default_prio;
3106 } else
3107 qedf->prio = QEDF_DEFAULT_PRIO;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003108
3109 /*
3110 * Common probe. Takes care of basic hardware init and pci_*
3111 * functions.
3112 */
3113 memset(&qed_params, 0, sizeof(qed_params));
3114 qed_params.protocol = QED_PROTOCOL_FCOE;
3115 qed_params.dp_module = qedf_dp_module;
3116 qed_params.dp_level = qedf_dp_level;
3117 qed_params.is_vf = is_vf;
3118 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3119 if (!qedf->cdev) {
3120 rc = -ENODEV;
3121 goto err1;
3122 }
3123
Thomas Bogendoerfer722477c2017-07-25 11:19:21 +02003124 /* Learn information crucial for qedf to progress */
3125 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3126 if (rc) {
3127 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3128 goto err1;
3129 }
3130
Dupuis, Chad61d86582017-02-15 06:28:23 -08003131 /* queue allocation code should come here
3132 * order should be
3133 * slowpath_start
3134 * status block allocation
3135 * interrupt registration (to get min number of queues)
3136 * set_fcoe_pf_param
3137 * qed_sp_fcoe_func_start
3138 */
3139 rc = qedf_set_fcoe_pf_param(qedf);
3140 if (rc) {
3141 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3142 goto err2;
3143 }
3144 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3145
Dupuis, Chad61d86582017-02-15 06:28:23 -08003146 /* Record BDQ producer doorbell addresses */
3147 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3148 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3149 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3150 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3151 qedf->bdq_secondary_prod);
3152
3153 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3154
3155 rc = qedf_prepare_sb(qedf);
3156 if (rc) {
3157
3158 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3159 goto err2;
3160 }
3161
3162 /* Start the Slowpath-process */
3163 slowpath_params.int_mode = QED_INT_MODE_MSIX;
3164 slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
3165 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
3166 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
3167 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
Kees Cookcd228742017-05-05 15:42:55 -07003168 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003169 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3170 if (rc) {
3171 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3172 goto err2;
3173 }
3174
3175 /*
3176 * update_pf_params needs to be called before and after slowpath
3177 * start
3178 */
3179 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3180
3181 /* Setup interrupts */
3182 rc = qedf_setup_int(qedf);
3183 if (rc)
3184 goto err3;
3185
3186 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3187 if (rc) {
3188 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3189 goto err4;
3190 }
3191 task_start = qedf_get_task_mem(&qedf->tasks, 0);
3192 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3193 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3194 "end=%p block_size=%u.\n", task_start, task_end,
3195 qedf->tasks.size);
3196
3197 /*
3198 * We need to write the number of BDs in the BDQ we've preallocated so
3199 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
3200 * packet arrives.
3201 */
3202 qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3203 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3204 "Writing %d to primary and secondary BDQ doorbell registers.\n",
3205 qedf->bdq_prod_idx);
3206 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3207 tmp = readw(qedf->bdq_primary_prod);
3208 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3209 tmp = readw(qedf->bdq_secondary_prod);
3210
3211 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3212
3213 /* Now that the dev_info struct has been filled in set the MAC
3214 * address
3215 */
3216 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3217 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3218 qedf->mac);
3219
Chad Dupuis01fd76a2017-08-15 10:08:16 -07003220 /*
3221 * Set the WWNN and WWPN in the following way:
3222 *
3223 * If the info we get from qed is non-zero then use that to set the
3224 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
3225 * on the MAC address.
3226 */
3227 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3228 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3229 "Setting WWPN and WWNN from qed dev_info.\n");
3230 qedf->wwnn = qedf->dev_info.wwnn;
3231 qedf->wwpn = qedf->dev_info.wwpn;
3232 } else {
3233 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3234 "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
3235 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3236 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3237 }
Dupuis, Chad61d86582017-02-15 06:28:23 -08003238 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
3239 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3240
3241 sprintf(host_buf, "host_%d", host->host_no);
Mintz, Yuval712c3cb2017-05-23 09:41:28 +03003242 qed_ops->common->set_name(qedf->cdev, host_buf);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003243
Dupuis, Chad61d86582017-02-15 06:28:23 -08003244 /* Allocate cmd mgr */
3245 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3246 if (!qedf->cmd_mgr) {
3247 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
Wei Yongjune89cabf2018-01-17 12:42:41 +00003248 rc = -ENOMEM;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003249 goto err5;
3250 }
3251
3252 if (mode != QEDF_MODE_RECOVERY) {
3253 host->transportt = qedf_fc_transport_template;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003254 host->max_lun = qedf_max_lun;
3255 host->max_cmd_len = QEDF_MAX_CDB_LEN;
Chad Dupuis650ce642019-03-26 00:38:34 -07003256 host->can_queue = FCOE_PARAMS_NUM_TASKS;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003257 rc = scsi_add_host(host, &pdev->dev);
3258 if (rc)
3259 goto err6;
3260 }
3261
3262 memset(&params, 0, sizeof(params));
3263 params.mtu = 9000;
3264 ether_addr_copy(params.ll2_mac_address, qedf->mac);
3265
3266 /* Start LL2 processing thread */
3267 snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
3268 qedf->ll2_recv_wq =
Chad Dupuis428ca642017-08-15 10:08:19 -07003269 create_workqueue(host_buf);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003270 if (!qedf->ll2_recv_wq) {
3271 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
Wei Yongjune89cabf2018-01-17 12:42:41 +00003272 rc = -ENOMEM;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003273 goto err7;
3274 }
3275
3276#ifdef CONFIG_DEBUG_FS
Arnd Bergmannd9ea4632018-02-02 14:12:18 +01003277 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3278 qedf_dbg_fops);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003279#endif
3280
3281 /* Start LL2 */
3282 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3283 rc = qed_ops->ll2->start(qedf->cdev, &params);
3284 if (rc) {
3285 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3286 goto err7;
3287 }
3288 set_bit(QEDF_LL2_STARTED, &qedf->flags);
3289
Chad Dupuiscf291162017-08-15 10:08:18 -07003290 /* Set initial FIP/FCoE VLAN to NULL */
Dupuis, Chad61d86582017-02-15 06:28:23 -08003291 qedf->vlan_id = 0;
3292
3293 /*
3294 * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3295 * they were not reaped during the unload process.
3296 */
3297 if (mode != QEDF_MODE_RECOVERY) {
3298 /* Setup imbedded fcoe controller */
3299 qedf_fcoe_ctlr_setup(qedf);
3300
3301 /* Setup lport */
3302 rc = qedf_lport_setup(qedf);
3303 if (rc) {
3304 QEDF_ERR(&(qedf->dbg_ctx),
3305 "qedf_lport_setup failed.\n");
3306 goto err7;
3307 }
3308 }
3309
3310 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3311 qedf->timer_work_queue =
Chad Dupuis428ca642017-08-15 10:08:19 -07003312 create_workqueue(host_buf);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003313 if (!qedf->timer_work_queue) {
3314 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3315 "workqueue.\n");
Wei Yongjune89cabf2018-01-17 12:42:41 +00003316 rc = -ENOMEM;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003317 goto err7;
3318 }
3319
3320 /* DPC workqueue is not reaped during recovery unload */
3321 if (mode != QEDF_MODE_RECOVERY) {
3322 sprintf(host_buf, "qedf_%u_dpc",
3323 qedf->lport->host->host_no);
Chad Dupuis428ca642017-08-15 10:08:19 -07003324 qedf->dpc_wq = create_workqueue(host_buf);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003325 }
3326
3327 /*
3328 * GRC dump and sysfs parameters are not reaped during the recovery
3329 * unload process.
3330 */
3331 if (mode != QEDF_MODE_RECOVERY) {
Chad Dupuis4b9b7fa2018-04-25 06:08:58 -07003332 qedf->grcdump_size =
3333 qed_ops->common->dbg_all_data_size(qedf->cdev);
Dupuis, Chad61d86582017-02-15 06:28:23 -08003334 if (qedf->grcdump_size) {
3335 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3336 qedf->grcdump_size);
3337 if (rc) {
3338 QEDF_ERR(&(qedf->dbg_ctx),
3339 "GRC Dump buffer alloc failed.\n");
3340 qedf->grcdump = NULL;
3341 }
3342
3343 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3344 "grcdump: addr=%p, size=%u.\n",
3345 qedf->grcdump, qedf->grcdump_size);
3346 }
3347 qedf_create_sysfs_ctx_attr(qedf);
3348
3349 /* Initialize I/O tracing for this adapter */
3350 spin_lock_init(&qedf->io_trace_lock);
3351 qedf->io_trace_idx = 0;
3352 }
3353
3354 init_completion(&qedf->flogi_compl);
3355
Saurav Kashyap6ac17472018-07-05 07:01:33 -07003356 status = qed_ops->common->update_drv_state(qedf->cdev, true);
3357 if (status)
3358 QEDF_ERR(&(qedf->dbg_ctx),
3359 "Failed to send drv state to MFW.\n");
3360
Dupuis, Chad61d86582017-02-15 06:28:23 -08003361 memset(&link_params, 0, sizeof(struct qed_link_params));
3362 link_params.link_up = true;
3363 status = qed_ops->common->set_link(qedf->cdev, &link_params);
3364 if (status)
3365 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3366
3367 /* Start/restart discovery */
3368 if (mode == QEDF_MODE_RECOVERY)
3369 fcoe_ctlr_link_up(&qedf->ctlr);
3370 else
3371 fc_fabric_login(lport);
3372
3373 /* All good */
3374 return 0;
3375
3376err7:
3377 if (qedf->ll2_recv_wq)
3378 destroy_workqueue(qedf->ll2_recv_wq);
3379 fc_remove_host(qedf->lport->host);
3380 scsi_remove_host(qedf->lport->host);
3381#ifdef CONFIG_DEBUG_FS
3382 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3383#endif
3384err6:
3385 qedf_cmd_mgr_free(qedf->cmd_mgr);
3386err5:
3387 qed_ops->stop(qedf->cdev);
3388err4:
3389 qedf_free_fcoe_pf_param(qedf);
3390 qedf_sync_free_irqs(qedf);
3391err3:
3392 qed_ops->common->slowpath_stop(qedf->cdev);
3393err2:
3394 qed_ops->common->remove(qedf->cdev);
3395err1:
3396 scsi_host_put(lport->host);
3397err0:
3398 return rc;
3399}
3400
3401static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3402{
3403 return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3404}
3405
3406static void __qedf_remove(struct pci_dev *pdev, int mode)
3407{
3408 struct qedf_ctx *qedf;
Saurav Kashyap6ac17472018-07-05 07:01:33 -07003409 int rc;
Dupuis, Chad61d86582017-02-15 06:28:23 -08003410
3411 if (!pdev) {
3412 QEDF_ERR(NULL, "pdev is NULL.\n");
3413 return;
3414 }
3415
3416 qedf = pci_get_drvdata(pdev);
3417
3418 /*
3419 * Prevent race where we're in board disable work and then try to
3420 * rmmod the module.
3421 */
3422 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3423 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3424 return;
3425 }
3426
3427 if (mode != QEDF_MODE_RECOVERY)
3428 set_bit(QEDF_UNLOADING, &qedf->flags);
3429
3430 /* Logoff the fabric to upload all connections */
3431 if (mode == QEDF_MODE_RECOVERY)
3432 fcoe_ctlr_link_down(&qedf->ctlr);
3433 else
3434 fc_fabric_logoff(qedf->lport);
3435 qedf_wait_for_upload(qedf);
3436
3437#ifdef CONFIG_DEBUG_FS
3438 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3439#endif
3440
3441 /* Stop any link update handling */
3442 cancel_delayed_work_sync(&qedf->link_update);
3443 destroy_workqueue(qedf->link_update_wq);
3444 qedf->link_update_wq = NULL;
3445
3446 if (qedf->timer_work_queue)
3447 destroy_workqueue(qedf->timer_work_queue);
3448
3449 /* Stop Light L2 */
3450 clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3451 qed_ops->ll2->stop(qedf->cdev);
3452 if (qedf->ll2_recv_wq)
3453 destroy_workqueue(qedf->ll2_recv_wq);
3454
3455 /* Stop fastpath */
3456 qedf_sync_free_irqs(qedf);
3457 qedf_destroy_sb(qedf);
3458
3459 /*
3460 * During recovery don't destroy OS constructs that represent the
3461 * physical port.
3462 */
3463 if (mode != QEDF_MODE_RECOVERY) {
3464 qedf_free_grc_dump_buf(&qedf->grcdump);
3465 qedf_remove_sysfs_ctx_attr(qedf);
3466
3467 /* Remove all SCSI/libfc/libfcoe structures */
3468 fcoe_ctlr_destroy(&qedf->ctlr);
3469 fc_lport_destroy(qedf->lport);
3470 fc_remove_host(qedf->lport->host);
3471 scsi_remove_host(qedf->lport->host);
3472 }
3473
3474 qedf_cmd_mgr_free(qedf->cmd_mgr);
3475
3476 if (mode != QEDF_MODE_RECOVERY) {
3477 fc_exch_mgr_free(qedf->lport);
3478 fc_lport_free_stats(qedf->lport);
3479
3480 /* Wait for all vports to be reaped */
3481 qedf_wait_for_vport_destroy(qedf);
3482 }
3483
3484 /*
3485 * Now that all connections have been uploaded we can stop the
3486 * rest of the qed operations
3487 */
3488 qed_ops->stop(qedf->cdev);
3489
3490 if (mode != QEDF_MODE_RECOVERY) {
3491 if (qedf->dpc_wq) {
3492 /* Stop general DPC handling */
3493 destroy_workqueue(qedf->dpc_wq);
3494 qedf->dpc_wq = NULL;
3495 }
3496 }
3497
3498 /* Final shutdown for the board */
3499 qedf_free_fcoe_pf_param(qedf);
3500 if (mode != QEDF_MODE_RECOVERY) {
3501 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3502 pci_set_drvdata(pdev, NULL);
3503 }
Saurav Kashyap6ac17472018-07-05 07:01:33 -07003504
3505 rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3506 if (rc)
3507 QEDF_ERR(&(qedf->dbg_ctx),
3508 "Failed to send drv state to MFW.\n");
3509
Dupuis, Chad61d86582017-02-15 06:28:23 -08003510 qed_ops->common->slowpath_stop(qedf->cdev);
3511 qed_ops->common->remove(qedf->cdev);
3512
3513 mempool_destroy(qedf->io_mempool);
3514
3515 /* Only reap the Scsi_host on a real removal */
3516 if (mode != QEDF_MODE_RECOVERY)
3517 scsi_host_put(qedf->lport->host);
3518}
3519
3520static void qedf_remove(struct pci_dev *pdev)
3521{
3522 /* Check to make sure this function wasn't already disabled */
3523 if (!atomic_read(&pdev->enable_cnt))
3524 return;
3525
3526 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3527}
3528
Chad Dupuis4b9b7fa2018-04-25 06:08:58 -07003529void qedf_wq_grcdump(struct work_struct *work)
3530{
3531 struct qedf_ctx *qedf =
3532 container_of(work, struct qedf_ctx, grcdump_work.work);
3533
3534 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3535 qedf_capture_grc_dump(qedf);
3536}
3537
Dupuis, Chad61d86582017-02-15 06:28:23 -08003538/*
Chad Dupuis642a0b32018-05-22 00:28:43 -07003539 * Protocol TLV handler
3540 */
3541void qedf_get_protocol_tlv_data(void *dev, void *data)
3542{
3543 struct qedf_ctx *qedf = dev;
3544 struct qed_mfw_tlv_fcoe *fcoe = data;
3545 struct fc_lport *lport = qedf->lport;
3546 struct Scsi_Host *host = lport->host;
3547 struct fc_host_attrs *fc_host = shost_to_fc_host(host);
3548 struct fc_host_statistics *hst;
3549
3550 /* Force a refresh of the fc_host stats including offload stats */
3551 hst = qedf_fc_get_host_stats(host);
3552
3553 fcoe->qos_pri_set = true;
3554 fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
3555
3556 fcoe->ra_tov_set = true;
3557 fcoe->ra_tov = lport->r_a_tov;
3558
3559 fcoe->ed_tov_set = true;
3560 fcoe->ed_tov = lport->e_d_tov;
3561
3562 fcoe->npiv_state_set = true;
3563 fcoe->npiv_state = 1; /* NPIV always enabled */
3564
3565 fcoe->num_npiv_ids_set = true;
3566 fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
3567
3568 /* Certain attributes we only want to set if we've selected an FCF */
3569 if (qedf->ctlr.sel_fcf) {
3570 fcoe->switch_name_set = true;
3571 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3572 }
3573
3574 fcoe->port_state_set = true;
3575 /* For qedf we're either link down or fabric attach */
3576 if (lport->link_up)
3577 fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
3578 else
3579 fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
3580
3581 fcoe->link_failures_set = true;
3582 fcoe->link_failures = (u16)hst->link_failure_count;
3583
3584 fcoe->fcoe_txq_depth_set = true;
3585 fcoe->fcoe_rxq_depth_set = true;
3586 fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
3587 fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
3588
3589 fcoe->fcoe_rx_frames_set = true;
3590 fcoe->fcoe_rx_frames = hst->rx_frames;
3591
3592 fcoe->fcoe_tx_frames_set = true;
3593 fcoe->fcoe_tx_frames = hst->tx_frames;
3594
3595 fcoe->fcoe_rx_bytes_set = true;
3596 fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
3597
3598 fcoe->fcoe_tx_bytes_set = true;
3599 fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
3600
3601 fcoe->crc_count_set = true;
3602 fcoe->crc_count = hst->invalid_crc_count;
3603
3604 fcoe->tx_abts_set = true;
3605 fcoe->tx_abts = hst->fcp_packet_aborts;
3606
3607 fcoe->tx_lun_rst_set = true;
3608 fcoe->tx_lun_rst = qedf->lun_resets;
3609
3610 fcoe->abort_task_sets_set = true;
3611 fcoe->abort_task_sets = qedf->packet_aborts;
3612
3613 fcoe->scsi_busy_set = true;
3614 fcoe->scsi_busy = qedf->busy;
3615
3616 fcoe->scsi_tsk_full_set = true;
3617 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3618}
3619
Chad Dupuis8673daf2018-05-22 00:28:44 -07003620/* Generic TLV data callback */
3621void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
3622{
3623 struct qedf_ctx *qedf;
3624
3625 if (!dev) {
3626 QEDF_INFO(NULL, QEDF_LOG_EVT,
3627 "dev is NULL so ignoring get_generic_tlv_data request.\n");
3628 return;
3629 }
3630 qedf = (struct qedf_ctx *)dev;
3631
3632 memset(data, 0, sizeof(struct qed_generic_tlvs));
3633 ether_addr_copy(data->mac[0], qedf->mac);
3634}
3635
Chad Dupuis642a0b32018-05-22 00:28:43 -07003636/*
Dupuis, Chad61d86582017-02-15 06:28:23 -08003637 * Module Init/Remove
3638 */
3639
3640static int __init qedf_init(void)
3641{
3642 int ret;
3643
3644 /* If debug=1 passed, set the default log mask */
3645 if (qedf_debug == QEDF_LOG_DEFAULT)
3646 qedf_debug = QEDF_DEFAULT_LOG_MASK;
3647
Chad Dupuis84b2ba62018-04-25 06:08:52 -07003648 /*
3649 * Check that default prio for FIP/FCoE traffic is between 0..7 if a
3650 * value has been set
3651 */
3652 if (qedf_default_prio > -1)
3653 if (qedf_default_prio > 7) {
3654 qedf_default_prio = QEDF_DEFAULT_PRIO;
3655 QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
3656 QEDF_DEFAULT_PRIO);
3657 }
Chad Dupuisa93755c2018-04-25 06:08:50 -07003658
Dupuis, Chad61d86582017-02-15 06:28:23 -08003659 /* Print driver banner */
3660 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
3661 QEDF_VERSION);
3662
3663 /* Create kmem_cache for qedf_io_work structs */
3664 qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
3665 sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
3666 if (qedf_io_work_cache == NULL) {
3667 QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
3668 goto err1;
3669 }
3670 QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
3671 qedf_io_work_cache);
3672
3673 qed_ops = qed_get_fcoe_ops();
3674 if (!qed_ops) {
3675 QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
3676 goto err1;
3677 }
3678
3679#ifdef CONFIG_DEBUG_FS
3680 qedf_dbg_init("qedf");
3681#endif
3682
3683 qedf_fc_transport_template =
3684 fc_attach_transport(&qedf_fc_transport_fn);
3685 if (!qedf_fc_transport_template) {
3686 QEDF_ERR(NULL, "Could not register with FC transport\n");
3687 goto err2;
3688 }
3689
3690 qedf_fc_vport_transport_template =
3691 fc_attach_transport(&qedf_fc_vport_transport_fn);
3692 if (!qedf_fc_vport_transport_template) {
3693 QEDF_ERR(NULL, "Could not register vport template with FC "
3694 "transport\n");
3695 goto err3;
3696 }
3697
3698 qedf_io_wq = create_workqueue("qedf_io_wq");
3699 if (!qedf_io_wq) {
3700 QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
3701 goto err4;
3702 }
3703
3704 qedf_cb_ops.get_login_failures = qedf_get_login_failures;
3705
3706 ret = pci_register_driver(&qedf_pci_driver);
3707 if (ret) {
3708 QEDF_ERR(NULL, "Failed to register driver\n");
3709 goto err5;
3710 }
3711
3712 return 0;
3713
3714err5:
3715 destroy_workqueue(qedf_io_wq);
3716err4:
3717 fc_release_transport(qedf_fc_vport_transport_template);
3718err3:
3719 fc_release_transport(qedf_fc_transport_template);
3720err2:
3721#ifdef CONFIG_DEBUG_FS
3722 qedf_dbg_exit();
3723#endif
3724 qed_put_fcoe_ops();
3725err1:
3726 return -EINVAL;
3727}
3728
3729static void __exit qedf_cleanup(void)
3730{
3731 pci_unregister_driver(&qedf_pci_driver);
3732
3733 destroy_workqueue(qedf_io_wq);
3734
3735 fc_release_transport(qedf_fc_vport_transport_template);
3736 fc_release_transport(qedf_fc_transport_template);
3737#ifdef CONFIG_DEBUG_FS
3738 qedf_dbg_exit();
3739#endif
3740 qed_put_fcoe_ops();
3741
3742 kmem_cache_destroy(qedf_io_work_cache);
3743}
3744
3745MODULE_LICENSE("GPL");
3746MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
3747MODULE_AUTHOR("QLogic Corporation");
3748MODULE_VERSION(QEDF_VERSION);
3749module_init(qedf_init);
3750module_exit(qedf_cleanup);