blob: 52ade133b57cf68940327aeea75eeb9ee974dd86 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Erik Burrowsc6cc32a2019-02-19 05:31:13 -05004 * Copyright (c) 2016-2019 Broadcom Limited
Michael Chanc0c050c2015-10-22 16:01:17 -04005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
Vasundhara Volam0ca12be2019-02-19 05:31:15 -050034#include <linux/mdio.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040035#include <linux/if.h>
36#include <linux/if_vlan.h>
Michael Chan32e8239c2017-07-24 12:34:21 -040037#include <linux/if_bridge.h>
Rob Swindell5ac67d82016-09-19 03:58:03 -040038#include <linux/rtc.h>
Michael Chanc6d30e82017-02-06 16:55:42 -050039#include <linux/bpf.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040040#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
Alexander Duyckad51b8e2016-06-16 12:21:19 -070045#include <net/udp_tunnel.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040046#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
Vasundhara Volam56f0fd82017-08-28 13:40:27 -040053#include <linux/cpumask.h>
Sathya Perla2ae74082017-08-28 13:40:33 -040054#include <net/pkt_cls.h>
Vasundhara Volamcde49a42018-08-05 16:51:56 -040055#include <linux/hwmon.h>
56#include <linux/hwmon-sysfs.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040057
58#include "bnxt_hsi.h"
59#include "bnxt.h"
Michael Chana588e452016-12-07 00:26:21 -050060#include "bnxt_ulp.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040061#include "bnxt_sriov.h"
62#include "bnxt_ethtool.h"
Michael Chan7df4ae92016-12-02 21:17:17 -050063#include "bnxt_dcb.h"
Michael Chanc6d30e82017-02-06 16:55:42 -050064#include "bnxt_xdp.h"
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040065#include "bnxt_vfr.h"
Sathya Perla2ae74082017-08-28 13:40:33 -040066#include "bnxt_tc.h"
Steve Lin3c467bf2017-10-19 10:45:56 -040067#include "bnxt_devlink.h"
Andy Gospodarekcabfb092018-04-26 17:44:40 -040068#include "bnxt_debugfs.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040069
70#define BNXT_TX_TIMEOUT (5 * HZ)
71
72static const char version[] =
73 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
74
75MODULE_LICENSE("GPL");
76MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
77MODULE_VERSION(DRV_MODULE_VERSION);
78
79#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
80#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
81#define BNXT_RX_COPY_THRESH 256
82
Michael Chan4419dbe2016-02-10 17:33:49 -050083#define BNXT_TX_PUSH_THRESH 164
Michael Chanc0c050c2015-10-22 16:01:17 -040084
85enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050086 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040087 BCM57302,
88 BCM57304,
Michael Chan1f681682016-07-25 12:33:37 -040089 BCM57417_NPAR,
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040090 BCM58700,
Michael Chanb24eb6a2016-06-13 02:25:36 -040091 BCM57311,
92 BCM57312,
David Christensenfbc9a522015-12-27 18:19:29 -050093 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040094 BCM57404,
95 BCM57406,
Michael Chan1f681682016-07-25 12:33:37 -040096 BCM57402_NPAR,
97 BCM57407,
Michael Chanb24eb6a2016-06-13 02:25:36 -040098 BCM57412,
99 BCM57414,
100 BCM57416,
101 BCM57417,
Michael Chan1f681682016-07-25 12:33:37 -0400102 BCM57412_NPAR,
Michael Chan5049e332016-05-15 03:04:50 -0400103 BCM57314,
Michael Chan1f681682016-07-25 12:33:37 -0400104 BCM57417_SFP,
105 BCM57416_SFP,
106 BCM57404_NPAR,
107 BCM57406_NPAR,
108 BCM57407_SFP,
Michael Chanadbc8302016-09-19 03:58:01 -0400109 BCM57407_NPAR,
Michael Chan1f681682016-07-25 12:33:37 -0400110 BCM57414_NPAR,
111 BCM57416_NPAR,
Deepak Khungar32b40792017-02-12 19:18:18 -0500112 BCM57452,
113 BCM57454,
Vasundhara Volam92abef32018-01-17 03:21:13 -0500114 BCM5745x_NPAR,
Michael Chan1ab968d2018-10-14 07:02:59 -0400115 BCM57508,
Erik Burrowsc6cc32a2019-02-19 05:31:13 -0500116 BCM57504,
Ray Jui4a581392017-08-28 13:40:28 -0400117 BCM58802,
Ray Jui8ed693b2017-10-26 11:51:20 -0400118 BCM58804,
Ray Jui4a581392017-08-28 13:40:28 -0400119 BCM58808,
Michael Chanadbc8302016-09-19 03:58:01 -0400120 NETXTREME_E_VF,
121 NETXTREME_C_VF,
Rob Miller618784e2017-10-26 11:51:21 -0400122 NETXTREME_S_VF,
Michael Chanb16b6892018-12-16 18:46:25 -0500123 NETXTREME_E_P5_VF,
Michael Chanc0c050c2015-10-22 16:01:17 -0400124};
125
126/* indexed by enum above */
127static const struct {
128 char *name;
129} board_info[] = {
Scott Branden27573a72017-08-28 13:40:29 -0400130 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
131 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
132 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
133 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
134 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
135 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
136 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
138 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
139 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
140 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
141 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
142 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
143 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
146 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
147 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
148 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
150 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
151 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
152 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
153 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
154 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
155 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
156 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
157 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
Vasundhara Volam92abef32018-01-17 03:21:13 -0500158 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
Michael Chan1ab968d2018-10-14 07:02:59 -0400159 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
Erik Burrowsc6cc32a2019-02-19 05:31:13 -0500160 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
Scott Branden27573a72017-08-28 13:40:29 -0400161 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
Ray Jui8ed693b2017-10-26 11:51:20 -0400162 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
Scott Branden27573a72017-08-28 13:40:29 -0400163 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
164 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
165 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
Rob Miller618784e2017-10-26 11:51:21 -0400166 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
Michael Chanb16b6892018-12-16 18:46:25 -0500167 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
Michael Chanc0c050c2015-10-22 16:01:17 -0400168};
169
170static const struct pci_device_id bnxt_pci_tbl[] = {
Vasundhara Volam92abef32018-01-17 03:21:13 -0500171 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
Ray Jui4a581392017-08-28 13:40:28 -0400173 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
Michael Chanadbc8302016-09-19 03:58:01 -0400174 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
David Christensenfbc9a522015-12-27 18:19:29 -0500175 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400176 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
177 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
Michael Chan1f681682016-07-25 12:33:37 -0400178 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -0400179 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400180 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
181 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
David Christensenfbc9a522015-12-27 18:19:29 -0500182 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400183 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
184 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
Michael Chan1f681682016-07-25 12:33:37 -0400185 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
186 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400187 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
188 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
189 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
190 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
Michael Chan1f681682016-07-25 12:33:37 -0400191 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
Michael Chan5049e332016-05-15 03:04:50 -0400192 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
Michael Chan1f681682016-07-25 12:33:37 -0400193 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
194 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
195 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
Michael Chanadbc8302016-09-19 03:58:01 -0400198 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
199 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400200 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400201 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400202 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400203 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
Ray Jui4a581392017-08-28 13:40:28 -0400204 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
Deepak Khungar32b40792017-02-12 19:18:18 -0500205 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
Michael Chan1ab968d2018-10-14 07:02:59 -0400206 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
Erik Burrowsc6cc32a2019-02-19 05:31:13 -0500207 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
Ray Jui4a581392017-08-28 13:40:28 -0400208 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
Ray Jui8ed693b2017-10-26 11:51:20 -0400209 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400210#ifdef CONFIG_BNXT_SRIOV
Deepak Khungarc7ef35e2017-05-29 19:06:05 -0400211 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
Michael Chanadbc8302016-09-19 03:58:01 -0400213 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
216 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
217 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
218 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
Michael Chanb16b6892018-12-16 18:46:25 -0500219 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
Rob Miller618784e2017-10-26 11:51:21 -0400220 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
Michael Chanc0c050c2015-10-22 16:01:17 -0400221#endif
222 { 0 }
223};
224
225MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
226
227static const u16 bnxt_vf_req_snif[] = {
228 HWRM_FUNC_CFG,
Vasundhara Volam91cdda42018-01-17 03:21:14 -0500229 HWRM_FUNC_VF_CFG,
Michael Chanc0c050c2015-10-22 16:01:17 -0400230 HWRM_PORT_PHY_QCFG,
231 HWRM_CFA_L2_FILTER_ALLOC,
232};
233
Michael Chan25be8622016-04-05 14:09:00 -0400234static const u16 bnxt_async_events_arr[] = {
Michael Chan87c374d2016-12-02 21:17:16 -0500235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
236 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
237 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
238 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
239 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
Michael Chan25be8622016-04-05 14:09:00 -0400240};
241
Michael Chanc213eae2017-10-13 21:09:29 -0400242static struct workqueue_struct *bnxt_pf_wq;
243
Michael Chanc0c050c2015-10-22 16:01:17 -0400244static bool bnxt_vf_pciid(enum board_idx idx)
245{
Rob Miller618784e2017-10-26 11:51:21 -0400246 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
Michael Chanb16b6892018-12-16 18:46:25 -0500247 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
Michael Chanc0c050c2015-10-22 16:01:17 -0400248}
249
250#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
251#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
253
Michael Chanc0c050c2015-10-22 16:01:17 -0400254#define BNXT_CP_DB_IRQ_DIS(db) \
255 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
Michael Chan697197e2018-10-14 07:02:46 -0400257#define BNXT_DB_CQ(db, idx) \
258 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260#define BNXT_DB_NQ_P5(db, idx) \
261 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
262
263#define BNXT_DB_CQ_ARM(db, idx) \
264 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
265
266#define BNXT_DB_NQ_ARM_P5(db, idx) \
267 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
268
269static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
270{
271 if (bp->flags & BNXT_FLAG_CHIP_P5)
272 BNXT_DB_NQ_P5(db, idx);
273 else
274 BNXT_DB_CQ(db, idx);
275}
276
277static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
278{
279 if (bp->flags & BNXT_FLAG_CHIP_P5)
280 BNXT_DB_NQ_ARM_P5(db, idx);
281 else
282 BNXT_DB_CQ_ARM(db, idx);
283}
284
285static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
286{
287 if (bp->flags & BNXT_FLAG_CHIP_P5)
288 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
289 db->doorbell);
290 else
291 BNXT_DB_CQ(db, idx);
292}
293
Michael Chan38413402017-02-06 16:55:43 -0500294const u16 bnxt_lhint_arr[] = {
Michael Chanc0c050c2015-10-22 16:01:17 -0400295 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
296 TX_BD_FLAGS_LHINT_512_TO_1023,
297 TX_BD_FLAGS_LHINT_1024_TO_2047,
298 TX_BD_FLAGS_LHINT_1024_TO_2047,
299 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314};
315
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400316static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
317{
318 struct metadata_dst *md_dst = skb_metadata_dst(skb);
319
320 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
321 return 0;
322
323 return md_dst->u.port_info.port_id;
324}
325
Michael Chanc0c050c2015-10-22 16:01:17 -0400326static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
327{
328 struct bnxt *bp = netdev_priv(dev);
329 struct tx_bd *txbd;
330 struct tx_bd_ext *txbd1;
331 struct netdev_queue *txq;
332 int i;
333 dma_addr_t mapping;
334 unsigned int length, pad = 0;
335 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
336 u16 prod, last_frag;
337 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400338 struct bnxt_tx_ring_info *txr;
339 struct bnxt_sw_tx_bd *tx_buf;
340
341 i = skb_get_queue_mapping(skb);
342 if (unlikely(i >= bp->tx_nr_rings)) {
343 dev_kfree_skb_any(skb);
344 return NETDEV_TX_OK;
345 }
346
Michael Chanc0c050c2015-10-22 16:01:17 -0400347 txq = netdev_get_tx_queue(dev, i);
Michael Chana960dec2017-02-06 16:55:39 -0500348 txr = &bp->tx_ring[bp->tx_ring_map[i]];
Michael Chanc0c050c2015-10-22 16:01:17 -0400349 prod = txr->tx_prod;
350
351 free_size = bnxt_tx_avail(bp, txr);
352 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
353 netif_tx_stop_queue(txq);
354 return NETDEV_TX_BUSY;
355 }
356
357 length = skb->len;
358 len = skb_headlen(skb);
359 last_frag = skb_shinfo(skb)->nr_frags;
360
361 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
362
363 txbd->tx_bd_opaque = prod;
364
365 tx_buf = &txr->tx_buf_ring[prod];
366 tx_buf->skb = skb;
367 tx_buf->nr_frags = last_frag;
368
369 vlan_tag_flags = 0;
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400370 cfa_action = bnxt_xmit_get_cfa_action(skb);
Michael Chanc0c050c2015-10-22 16:01:17 -0400371 if (skb_vlan_tag_present(skb)) {
372 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
373 skb_vlan_tag_get(skb);
374 /* Currently supports 8021Q, 8021AD vlan offloads
375 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
376 */
377 if (skb->vlan_proto == htons(ETH_P_8021Q))
378 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
379 }
380
381 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
Michael Chan4419dbe2016-02-10 17:33:49 -0500382 struct tx_push_buffer *tx_push_buf = txr->tx_push;
383 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
384 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
Michael Chan697197e2018-10-14 07:02:46 -0400385 void __iomem *db = txr->tx_db.doorbell;
Michael Chan4419dbe2016-02-10 17:33:49 -0500386 void *pdata = tx_push_buf->data;
387 u64 *end;
388 int j, push_len;
Michael Chanc0c050c2015-10-22 16:01:17 -0400389
390 /* Set COAL_NOW to be ready quickly for the next push */
391 tx_push->tx_bd_len_flags_type =
392 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
393 TX_BD_TYPE_LONG_TX_BD |
394 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
395 TX_BD_FLAGS_COAL_NOW |
396 TX_BD_FLAGS_PACKET_END |
397 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
398
399 if (skb->ip_summed == CHECKSUM_PARTIAL)
400 tx_push1->tx_bd_hsize_lflags =
401 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
402 else
403 tx_push1->tx_bd_hsize_lflags = 0;
404
405 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400406 tx_push1->tx_bd_cfa_action =
407 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
Michael Chanc0c050c2015-10-22 16:01:17 -0400408
Michael Chanfbb0fa82016-02-22 02:10:26 -0500409 end = pdata + length;
410 end = PTR_ALIGN(end, 8) - 1;
Michael Chan4419dbe2016-02-10 17:33:49 -0500411 *end = 0;
412
Michael Chanc0c050c2015-10-22 16:01:17 -0400413 skb_copy_from_linear_data(skb, pdata, len);
414 pdata += len;
415 for (j = 0; j < last_frag; j++) {
416 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
417 void *fptr;
418
419 fptr = skb_frag_address_safe(frag);
420 if (!fptr)
421 goto normal_tx;
422
423 memcpy(pdata, fptr, skb_frag_size(frag));
424 pdata += skb_frag_size(frag);
425 }
426
Michael Chan4419dbe2016-02-10 17:33:49 -0500427 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
428 txbd->tx_bd_haddr = txr->data_mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400429 prod = NEXT_TX(prod);
430 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
431 memcpy(txbd, tx_push1, sizeof(*txbd));
432 prod = NEXT_TX(prod);
Michael Chan4419dbe2016-02-10 17:33:49 -0500433 tx_push->doorbell =
Michael Chanc0c050c2015-10-22 16:01:17 -0400434 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
435 txr->tx_prod = prod;
436
Michael Chanb9a84602016-06-06 02:37:14 -0400437 tx_buf->is_push = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -0400438 netdev_tx_sent_queue(txq, skb->len);
Michael Chanb9a84602016-06-06 02:37:14 -0400439 wmb(); /* Sync is_push and byte queue before pushing data */
Michael Chanc0c050c2015-10-22 16:01:17 -0400440
Michael Chan4419dbe2016-02-10 17:33:49 -0500441 push_len = (length + sizeof(*tx_push) + 7) / 8;
442 if (push_len > 16) {
Michael Chan697197e2018-10-14 07:02:46 -0400443 __iowrite64_copy(db, tx_push_buf, 16);
444 __iowrite32_copy(db + 4, tx_push_buf + 1,
Michael Chan9d137442016-09-05 01:57:35 -0400445 (push_len - 16) << 1);
Michael Chan4419dbe2016-02-10 17:33:49 -0500446 } else {
Michael Chan697197e2018-10-14 07:02:46 -0400447 __iowrite64_copy(db, tx_push_buf, push_len);
Michael Chan4419dbe2016-02-10 17:33:49 -0500448 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400449
Michael Chanc0c050c2015-10-22 16:01:17 -0400450 goto tx_done;
451 }
452
453normal_tx:
454 if (length < BNXT_MIN_PKT_SIZE) {
455 pad = BNXT_MIN_PKT_SIZE - length;
456 if (skb_pad(skb, pad)) {
457 /* SKB already freed. */
458 tx_buf->skb = NULL;
459 return NETDEV_TX_OK;
460 }
461 length = BNXT_MIN_PKT_SIZE;
462 }
463
464 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
465
466 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
467 dev_kfree_skb_any(skb);
468 tx_buf->skb = NULL;
469 return NETDEV_TX_OK;
470 }
471
472 dma_unmap_addr_set(tx_buf, mapping, mapping);
473 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
474 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
475
476 txbd->tx_bd_haddr = cpu_to_le64(mapping);
477
478 prod = NEXT_TX(prod);
479 txbd1 = (struct tx_bd_ext *)
480 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
481
482 txbd1->tx_bd_hsize_lflags = 0;
483 if (skb_is_gso(skb)) {
484 u32 hdr_len;
485
486 if (skb->encapsulation)
487 hdr_len = skb_inner_network_offset(skb) +
488 skb_inner_network_header_len(skb) +
489 inner_tcp_hdrlen(skb);
490 else
491 hdr_len = skb_transport_offset(skb) +
492 tcp_hdrlen(skb);
493
494 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
495 TX_BD_FLAGS_T_IPID |
496 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
497 length = skb_shinfo(skb)->gso_size;
498 txbd1->tx_bd_mss = cpu_to_le32(length);
499 length += hdr_len;
500 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
501 txbd1->tx_bd_hsize_lflags =
502 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
503 txbd1->tx_bd_mss = 0;
504 }
505
506 length >>= 9;
Michael Chan2b3c6882019-02-27 03:58:53 -0500507 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
508 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
509 skb->len);
510 i = 0;
511 goto tx_dma_error;
512 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400513 flags |= bnxt_lhint_arr[length];
514 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
515
516 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400517 txbd1->tx_bd_cfa_action =
518 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
Michael Chanc0c050c2015-10-22 16:01:17 -0400519 for (i = 0; i < last_frag; i++) {
520 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
521
522 prod = NEXT_TX(prod);
523 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
524
525 len = skb_frag_size(frag);
526 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
527 DMA_TO_DEVICE);
528
529 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
530 goto tx_dma_error;
531
532 tx_buf = &txr->tx_buf_ring[prod];
533 dma_unmap_addr_set(tx_buf, mapping, mapping);
534
535 txbd->tx_bd_haddr = cpu_to_le64(mapping);
536
537 flags = len << TX_BD_LEN_SHIFT;
538 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
539 }
540
541 flags &= ~TX_BD_LEN;
542 txbd->tx_bd_len_flags_type =
543 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
544 TX_BD_FLAGS_PACKET_END);
545
546 netdev_tx_sent_queue(txq, skb->len);
547
548 /* Sync BD data before updating doorbell */
549 wmb();
550
551 prod = NEXT_TX(prod);
552 txr->tx_prod = prod;
553
Michael Chanffe40642017-05-30 20:03:00 -0400554 if (!skb->xmit_more || netif_xmit_stopped(txq))
Michael Chan697197e2018-10-14 07:02:46 -0400555 bnxt_db_write(bp, &txr->tx_db, prod);
Michael Chanc0c050c2015-10-22 16:01:17 -0400556
557tx_done:
558
559 mmiowb();
560
561 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
Michael Chan4d172f22017-05-29 19:06:09 -0400562 if (skb->xmit_more && !tx_buf->is_push)
Michael Chan697197e2018-10-14 07:02:46 -0400563 bnxt_db_write(bp, &txr->tx_db, prod);
Michael Chan4d172f22017-05-29 19:06:09 -0400564
Michael Chanc0c050c2015-10-22 16:01:17 -0400565 netif_tx_stop_queue(txq);
566
567 /* netif_tx_stop_queue() must be done before checking
568 * tx index in bnxt_tx_avail() below, because in
569 * bnxt_tx_int(), we update tx index before checking for
570 * netif_tx_queue_stopped().
571 */
572 smp_mb();
573 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
574 netif_tx_wake_queue(txq);
575 }
576 return NETDEV_TX_OK;
577
578tx_dma_error:
579 last_frag = i;
580
581 /* start back at beginning and unmap skb */
582 prod = txr->tx_prod;
583 tx_buf = &txr->tx_buf_ring[prod];
584 tx_buf->skb = NULL;
585 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
586 skb_headlen(skb), PCI_DMA_TODEVICE);
587 prod = NEXT_TX(prod);
588
589 /* unmap remaining mapped pages */
590 for (i = 0; i < last_frag; i++) {
591 prod = NEXT_TX(prod);
592 tx_buf = &txr->tx_buf_ring[prod];
593 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
594 skb_frag_size(&skb_shinfo(skb)->frags[i]),
595 PCI_DMA_TODEVICE);
596 }
597
598 dev_kfree_skb_any(skb);
599 return NETDEV_TX_OK;
600}
601
602static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
603{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500604 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chana960dec2017-02-06 16:55:39 -0500605 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
Michael Chanc0c050c2015-10-22 16:01:17 -0400606 u16 cons = txr->tx_cons;
607 struct pci_dev *pdev = bp->pdev;
608 int i;
609 unsigned int tx_bytes = 0;
610
611 for (i = 0; i < nr_pkts; i++) {
612 struct bnxt_sw_tx_bd *tx_buf;
613 struct sk_buff *skb;
614 int j, last;
615
616 tx_buf = &txr->tx_buf_ring[cons];
617 cons = NEXT_TX(cons);
618 skb = tx_buf->skb;
619 tx_buf->skb = NULL;
620
621 if (tx_buf->is_push) {
622 tx_buf->is_push = 0;
623 goto next_tx_int;
624 }
625
626 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
627 skb_headlen(skb), PCI_DMA_TODEVICE);
628 last = tx_buf->nr_frags;
629
630 for (j = 0; j < last; j++) {
631 cons = NEXT_TX(cons);
632 tx_buf = &txr->tx_buf_ring[cons];
633 dma_unmap_page(
634 &pdev->dev,
635 dma_unmap_addr(tx_buf, mapping),
636 skb_frag_size(&skb_shinfo(skb)->frags[j]),
637 PCI_DMA_TODEVICE);
638 }
639
640next_tx_int:
641 cons = NEXT_TX(cons);
642
643 tx_bytes += skb->len;
644 dev_kfree_skb_any(skb);
645 }
646
647 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
648 txr->tx_cons = cons;
649
650 /* Need to make the tx_cons update visible to bnxt_start_xmit()
651 * before checking for netif_tx_queue_stopped(). Without the
652 * memory barrier, there is a small possibility that bnxt_start_xmit()
653 * will miss it and cause the queue to be stopped forever.
654 */
655 smp_mb();
656
657 if (unlikely(netif_tx_queue_stopped(txq)) &&
658 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
659 __netif_tx_lock(txq, smp_processor_id());
660 if (netif_tx_queue_stopped(txq) &&
661 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
662 txr->dev_state != BNXT_DEV_STATE_CLOSING)
663 netif_tx_wake_queue(txq);
664 __netif_tx_unlock(txq);
665 }
666}
667
Michael Chanc61fb992017-02-06 16:55:36 -0500668static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
669 gfp_t gfp)
670{
671 struct device *dev = &bp->pdev->dev;
672 struct page *page;
673
674 page = alloc_page(gfp);
675 if (!page)
676 return NULL;
677
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700678 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
679 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -0500680 if (dma_mapping_error(dev, *mapping)) {
681 __free_page(page);
682 return NULL;
683 }
684 *mapping += bp->rx_dma_offset;
685 return page;
686}
687
Michael Chanc0c050c2015-10-22 16:01:17 -0400688static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
689 gfp_t gfp)
690{
691 u8 *data;
692 struct pci_dev *pdev = bp->pdev;
693
694 data = kmalloc(bp->rx_buf_size, gfp);
695 if (!data)
696 return NULL;
697
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700698 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
699 bp->rx_buf_use_size, bp->rx_dir,
700 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400701
702 if (dma_mapping_error(&pdev->dev, *mapping)) {
703 kfree(data);
704 data = NULL;
705 }
706 return data;
707}
708
Michael Chan38413402017-02-06 16:55:43 -0500709int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
710 u16 prod, gfp_t gfp)
Michael Chanc0c050c2015-10-22 16:01:17 -0400711{
712 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
713 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
Michael Chanc0c050c2015-10-22 16:01:17 -0400714 dma_addr_t mapping;
715
Michael Chanc61fb992017-02-06 16:55:36 -0500716 if (BNXT_RX_PAGE_MODE(bp)) {
717 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
Michael Chanc0c050c2015-10-22 16:01:17 -0400718
Michael Chanc61fb992017-02-06 16:55:36 -0500719 if (!page)
720 return -ENOMEM;
721
722 rx_buf->data = page;
723 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
724 } else {
725 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
726
727 if (!data)
728 return -ENOMEM;
729
730 rx_buf->data = data;
731 rx_buf->data_ptr = data + bp->rx_offset;
732 }
Michael Chan11cd1192017-02-06 16:55:33 -0500733 rx_buf->mapping = mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400734
735 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -0400736 return 0;
737}
738
Michael Chanc6d30e82017-02-06 16:55:42 -0500739void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
Michael Chanc0c050c2015-10-22 16:01:17 -0400740{
741 u16 prod = rxr->rx_prod;
742 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
743 struct rx_bd *cons_bd, *prod_bd;
744
745 prod_rx_buf = &rxr->rx_buf_ring[prod];
746 cons_rx_buf = &rxr->rx_buf_ring[cons];
747
748 prod_rx_buf->data = data;
Michael Chan6bb19472017-02-06 16:55:32 -0500749 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -0400750
Michael Chan11cd1192017-02-06 16:55:33 -0500751 prod_rx_buf->mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400752
753 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
754 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
755
756 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
757}
758
759static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
760{
761 u16 next, max = rxr->rx_agg_bmap_size;
762
763 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
764 if (next >= max)
765 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
766 return next;
767}
768
769static inline int bnxt_alloc_rx_page(struct bnxt *bp,
770 struct bnxt_rx_ring_info *rxr,
771 u16 prod, gfp_t gfp)
772{
773 struct rx_bd *rxbd =
774 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
775 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
776 struct pci_dev *pdev = bp->pdev;
777 struct page *page;
778 dma_addr_t mapping;
779 u16 sw_prod = rxr->rx_sw_agg_prod;
Michael Chan89d0a062016-04-25 02:30:51 -0400780 unsigned int offset = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400781
Michael Chan89d0a062016-04-25 02:30:51 -0400782 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
783 page = rxr->rx_page;
784 if (!page) {
785 page = alloc_page(gfp);
786 if (!page)
787 return -ENOMEM;
788 rxr->rx_page = page;
789 rxr->rx_page_offset = 0;
790 }
791 offset = rxr->rx_page_offset;
792 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
793 if (rxr->rx_page_offset == PAGE_SIZE)
794 rxr->rx_page = NULL;
795 else
796 get_page(page);
797 } else {
798 page = alloc_page(gfp);
799 if (!page)
800 return -ENOMEM;
801 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400802
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700803 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
804 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
805 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400806 if (dma_mapping_error(&pdev->dev, mapping)) {
807 __free_page(page);
808 return -EIO;
809 }
810
811 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
812 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
813
814 __set_bit(sw_prod, rxr->rx_agg_bmap);
815 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
816 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
817
818 rx_agg_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400819 rx_agg_buf->offset = offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400820 rx_agg_buf->mapping = mapping;
821 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
822 rxbd->rx_bd_opaque = sw_prod;
823 return 0;
824}
825
Michael Chane44758b2018-10-14 07:02:55 -0400826static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
Michael Chanc0c050c2015-10-22 16:01:17 -0400827 u32 agg_bufs)
828{
Michael Chane44758b2018-10-14 07:02:55 -0400829 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -0400830 struct bnxt *bp = bnapi->bp;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500831 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400832 u16 prod = rxr->rx_agg_prod;
833 u16 sw_prod = rxr->rx_sw_agg_prod;
834 u32 i;
835
836 for (i = 0; i < agg_bufs; i++) {
837 u16 cons;
838 struct rx_agg_cmp *agg;
839 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
840 struct rx_bd *prod_bd;
841 struct page *page;
842
843 agg = (struct rx_agg_cmp *)
844 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
845 cons = agg->rx_agg_cmp_opaque;
846 __clear_bit(cons, rxr->rx_agg_bmap);
847
848 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
849 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
850
851 __set_bit(sw_prod, rxr->rx_agg_bmap);
852 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
853 cons_rx_buf = &rxr->rx_agg_ring[cons];
854
855 /* It is possible for sw_prod to be equal to cons, so
856 * set cons_rx_buf->page to NULL first.
857 */
858 page = cons_rx_buf->page;
859 cons_rx_buf->page = NULL;
860 prod_rx_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400861 prod_rx_buf->offset = cons_rx_buf->offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400862
863 prod_rx_buf->mapping = cons_rx_buf->mapping;
864
865 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
866
867 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
868 prod_bd->rx_bd_opaque = sw_prod;
869
870 prod = NEXT_RX_AGG(prod);
871 sw_prod = NEXT_RX_AGG(sw_prod);
872 cp_cons = NEXT_CMP(cp_cons);
873 }
874 rxr->rx_agg_prod = prod;
875 rxr->rx_sw_agg_prod = sw_prod;
876}
877
Michael Chanc61fb992017-02-06 16:55:36 -0500878static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
879 struct bnxt_rx_ring_info *rxr,
880 u16 cons, void *data, u8 *data_ptr,
881 dma_addr_t dma_addr,
882 unsigned int offset_and_len)
883{
884 unsigned int payload = offset_and_len >> 16;
885 unsigned int len = offset_and_len & 0xffff;
886 struct skb_frag_struct *frag;
887 struct page *page = data;
888 u16 prod = rxr->rx_prod;
889 struct sk_buff *skb;
890 int off, err;
891
892 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
893 if (unlikely(err)) {
894 bnxt_reuse_rx_data(rxr, cons, data);
895 return NULL;
896 }
897 dma_addr -= bp->rx_dma_offset;
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700898 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
899 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -0500900
901 if (unlikely(!payload))
902 payload = eth_get_headlen(data_ptr, len);
903
904 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
905 if (!skb) {
906 __free_page(page);
907 return NULL;
908 }
909
910 off = (void *)data_ptr - page_address(page);
911 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
912 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
913 payload + NET_IP_ALIGN);
914
915 frag = &skb_shinfo(skb)->frags[0];
916 skb_frag_size_sub(frag, payload);
917 frag->page_offset += payload;
918 skb->data_len -= payload;
919 skb->tail += payload;
920
921 return skb;
922}
923
Michael Chanc0c050c2015-10-22 16:01:17 -0400924static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
925 struct bnxt_rx_ring_info *rxr, u16 cons,
Michael Chan6bb19472017-02-06 16:55:32 -0500926 void *data, u8 *data_ptr,
927 dma_addr_t dma_addr,
928 unsigned int offset_and_len)
Michael Chanc0c050c2015-10-22 16:01:17 -0400929{
Michael Chan6bb19472017-02-06 16:55:32 -0500930 u16 prod = rxr->rx_prod;
Michael Chanc0c050c2015-10-22 16:01:17 -0400931 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -0500932 int err;
Michael Chanc0c050c2015-10-22 16:01:17 -0400933
934 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
935 if (unlikely(err)) {
936 bnxt_reuse_rx_data(rxr, cons, data);
937 return NULL;
938 }
939
940 skb = build_skb(data, 0);
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700941 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
942 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400943 if (!skb) {
944 kfree(data);
945 return NULL;
946 }
947
Michael Chanb3dba772017-02-06 16:55:35 -0500948 skb_reserve(skb, bp->rx_offset);
Michael Chan6bb19472017-02-06 16:55:32 -0500949 skb_put(skb, offset_and_len & 0xffff);
Michael Chanc0c050c2015-10-22 16:01:17 -0400950 return skb;
951}
952
Michael Chane44758b2018-10-14 07:02:55 -0400953static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
954 struct bnxt_cp_ring_info *cpr,
Michael Chanc0c050c2015-10-22 16:01:17 -0400955 struct sk_buff *skb, u16 cp_cons,
956 u32 agg_bufs)
957{
Michael Chane44758b2018-10-14 07:02:55 -0400958 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -0400959 struct pci_dev *pdev = bp->pdev;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500960 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400961 u16 prod = rxr->rx_agg_prod;
962 u32 i;
963
964 for (i = 0; i < agg_bufs; i++) {
965 u16 cons, frag_len;
966 struct rx_agg_cmp *agg;
967 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
968 struct page *page;
969 dma_addr_t mapping;
970
971 agg = (struct rx_agg_cmp *)
972 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
973 cons = agg->rx_agg_cmp_opaque;
974 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
975 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
976
977 cons_rx_buf = &rxr->rx_agg_ring[cons];
Michael Chan89d0a062016-04-25 02:30:51 -0400978 skb_fill_page_desc(skb, i, cons_rx_buf->page,
979 cons_rx_buf->offset, frag_len);
Michael Chanc0c050c2015-10-22 16:01:17 -0400980 __clear_bit(cons, rxr->rx_agg_bmap);
981
982 /* It is possible for bnxt_alloc_rx_page() to allocate
983 * a sw_prod index that equals the cons index, so we
984 * need to clear the cons entry now.
985 */
Michael Chan11cd1192017-02-06 16:55:33 -0500986 mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400987 page = cons_rx_buf->page;
988 cons_rx_buf->page = NULL;
989
990 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
991 struct skb_shared_info *shinfo;
992 unsigned int nr_frags;
993
994 shinfo = skb_shinfo(skb);
995 nr_frags = --shinfo->nr_frags;
996 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
997
998 dev_kfree_skb(skb);
999
1000 cons_rx_buf->page = page;
1001
1002 /* Update prod since possibly some pages have been
1003 * allocated already.
1004 */
1005 rxr->rx_agg_prod = prod;
Michael Chane44758b2018-10-14 07:02:55 -04001006 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
Michael Chanc0c050c2015-10-22 16:01:17 -04001007 return NULL;
1008 }
1009
Shannon Nelsonc519fe92017-05-09 18:30:12 -07001010 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1011 PCI_DMA_FROMDEVICE,
1012 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04001013
1014 skb->data_len += frag_len;
1015 skb->len += frag_len;
1016 skb->truesize += PAGE_SIZE;
1017
1018 prod = NEXT_RX_AGG(prod);
1019 cp_cons = NEXT_CMP(cp_cons);
1020 }
1021 rxr->rx_agg_prod = prod;
1022 return skb;
1023}
1024
1025static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1026 u8 agg_bufs, u32 *raw_cons)
1027{
1028 u16 last;
1029 struct rx_agg_cmp *agg;
1030
1031 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1032 last = RING_CMP(*raw_cons);
1033 agg = (struct rx_agg_cmp *)
1034 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1035 return RX_AGG_CMP_VALID(agg, *raw_cons);
1036}
1037
1038static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1039 unsigned int len,
1040 dma_addr_t mapping)
1041{
1042 struct bnxt *bp = bnapi->bp;
1043 struct pci_dev *pdev = bp->pdev;
1044 struct sk_buff *skb;
1045
1046 skb = napi_alloc_skb(&bnapi->napi, len);
1047 if (!skb)
1048 return NULL;
1049
Michael Chan745fc052017-02-06 16:55:34 -05001050 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1051 bp->rx_dir);
Michael Chanc0c050c2015-10-22 16:01:17 -04001052
Michael Chan6bb19472017-02-06 16:55:32 -05001053 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1054 len + NET_IP_ALIGN);
Michael Chanc0c050c2015-10-22 16:01:17 -04001055
Michael Chan745fc052017-02-06 16:55:34 -05001056 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1057 bp->rx_dir);
Michael Chanc0c050c2015-10-22 16:01:17 -04001058
1059 skb_put(skb, len);
1060 return skb;
1061}
1062
Michael Chane44758b2018-10-14 07:02:55 -04001063static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
Michael Chanfa7e2812016-05-10 19:18:00 -04001064 u32 *raw_cons, void *cmp)
1065{
Michael Chanfa7e2812016-05-10 19:18:00 -04001066 struct rx_cmp *rxcmp = cmp;
1067 u32 tmp_raw_cons = *raw_cons;
1068 u8 cmp_type, agg_bufs = 0;
1069
1070 cmp_type = RX_CMP_TYPE(rxcmp);
1071
1072 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1073 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1074 RX_CMP_AGG_BUFS) >>
1075 RX_CMP_AGG_BUFS_SHIFT;
1076 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1077 struct rx_tpa_end_cmp *tpa_end = cmp;
1078
1079 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1080 RX_TPA_END_CMP_AGG_BUFS) >>
1081 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1082 }
1083
1084 if (agg_bufs) {
1085 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1086 return -EBUSY;
1087 }
1088 *raw_cons = tmp_raw_cons;
1089 return 0;
1090}
1091
Michael Chanc213eae2017-10-13 21:09:29 -04001092static void bnxt_queue_sp_work(struct bnxt *bp)
1093{
1094 if (BNXT_PF(bp))
1095 queue_work(bnxt_pf_wq, &bp->sp_task);
1096 else
1097 schedule_work(&bp->sp_task);
1098}
1099
1100static void bnxt_cancel_sp_work(struct bnxt *bp)
1101{
1102 if (BNXT_PF(bp))
1103 flush_workqueue(bnxt_pf_wq);
1104 else
1105 cancel_work_sync(&bp->sp_task);
1106}
1107
Michael Chanfa7e2812016-05-10 19:18:00 -04001108static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1109{
1110 if (!rxr->bnapi->in_reset) {
1111 rxr->bnapi->in_reset = true;
1112 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04001113 bnxt_queue_sp_work(bp);
Michael Chanfa7e2812016-05-10 19:18:00 -04001114 }
1115 rxr->rx_next_cons = 0xffff;
1116}
1117
Michael Chanc0c050c2015-10-22 16:01:17 -04001118static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1119 struct rx_tpa_start_cmp *tpa_start,
1120 struct rx_tpa_start_cmp_ext *tpa_start1)
1121{
1122 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1123 u16 cons, prod;
1124 struct bnxt_tpa_info *tpa_info;
1125 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1126 struct rx_bd *prod_bd;
1127 dma_addr_t mapping;
1128
1129 cons = tpa_start->rx_tpa_start_cmp_opaque;
1130 prod = rxr->rx_prod;
1131 cons_rx_buf = &rxr->rx_buf_ring[cons];
1132 prod_rx_buf = &rxr->rx_buf_ring[prod];
1133 tpa_info = &rxr->rx_tpa[agg_id];
1134
Michael Chanfa7e2812016-05-10 19:18:00 -04001135 if (unlikely(cons != rxr->rx_next_cons)) {
Michael Chana1b0e4e2019-04-08 17:39:54 -04001136 netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
1137 cons, rxr->rx_next_cons);
Michael Chanfa7e2812016-05-10 19:18:00 -04001138 bnxt_sched_reset(bp, rxr);
1139 return;
1140 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001141 /* Store cfa_code in tpa_info to use in tpa_end
1142 * completion processing.
1143 */
1144 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
Michael Chanc0c050c2015-10-22 16:01:17 -04001145 prod_rx_buf->data = tpa_info->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001146 prod_rx_buf->data_ptr = tpa_info->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001147
1148 mapping = tpa_info->mapping;
Michael Chan11cd1192017-02-06 16:55:33 -05001149 prod_rx_buf->mapping = mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001150
1151 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1152
1153 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1154
1155 tpa_info->data = cons_rx_buf->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001156 tpa_info->data_ptr = cons_rx_buf->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001157 cons_rx_buf->data = NULL;
Michael Chan11cd1192017-02-06 16:55:33 -05001158 tpa_info->mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001159
1160 tpa_info->len =
1161 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1162 RX_TPA_START_CMP_LEN_SHIFT;
1163 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1164 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1165
1166 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1167 tpa_info->gso_type = SKB_GSO_TCPV4;
1168 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
Michael Chan50f011b2018-08-05 16:51:51 -04001169 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
Michael Chanc0c050c2015-10-22 16:01:17 -04001170 tpa_info->gso_type = SKB_GSO_TCPV6;
1171 tpa_info->rss_hash =
1172 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1173 } else {
1174 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1175 tpa_info->gso_type = 0;
1176 if (netif_msg_rx_err(bp))
1177 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1178 }
1179 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1180 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
Michael Chan94758f82016-06-13 02:25:35 -04001181 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
Michael Chanc0c050c2015-10-22 16:01:17 -04001182
1183 rxr->rx_prod = NEXT_RX(prod);
1184 cons = NEXT_RX(cons);
Michael Chan376a5b82016-05-10 19:17:59 -04001185 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001186 cons_rx_buf = &rxr->rx_buf_ring[cons];
1187
1188 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1189 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1190 cons_rx_buf->data = NULL;
1191}
1192
Michael Chane44758b2018-10-14 07:02:55 -04001193static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1194 u32 agg_bufs)
Michael Chanc0c050c2015-10-22 16:01:17 -04001195{
1196 if (agg_bufs)
Michael Chane44758b2018-10-14 07:02:55 -04001197 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001198}
1199
Michael Chan94758f82016-06-13 02:25:35 -04001200static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1201 int payload_off, int tcp_ts,
1202 struct sk_buff *skb)
1203{
1204#ifdef CONFIG_INET
1205 struct tcphdr *th;
1206 int len, nw_off;
1207 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1208 u32 hdr_info = tpa_info->hdr_info;
1209 bool loopback = false;
1210
1211 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1212 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1213 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1214
1215 /* If the packet is an internal loopback packet, the offsets will
1216 * have an extra 4 bytes.
1217 */
1218 if (inner_mac_off == 4) {
1219 loopback = true;
1220 } else if (inner_mac_off > 4) {
1221 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1222 ETH_HLEN - 2));
1223
1224 /* We only support inner iPv4/ipv6. If we don't see the
1225 * correct protocol ID, it must be a loopback packet where
1226 * the offsets are off by 4.
1227 */
Dan Carpenter09a76362016-07-07 11:23:09 +03001228 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
Michael Chan94758f82016-06-13 02:25:35 -04001229 loopback = true;
1230 }
1231 if (loopback) {
1232 /* internal loopback packet, subtract all offsets by 4 */
1233 inner_ip_off -= 4;
1234 inner_mac_off -= 4;
1235 outer_ip_off -= 4;
1236 }
1237
1238 nw_off = inner_ip_off - ETH_HLEN;
1239 skb_set_network_header(skb, nw_off);
1240 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1241 struct ipv6hdr *iph = ipv6_hdr(skb);
1242
1243 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1244 len = skb->len - skb_transport_offset(skb);
1245 th = tcp_hdr(skb);
1246 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1247 } else {
1248 struct iphdr *iph = ip_hdr(skb);
1249
1250 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1251 len = skb->len - skb_transport_offset(skb);
1252 th = tcp_hdr(skb);
1253 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1254 }
1255
1256 if (inner_mac_off) { /* tunnel */
1257 struct udphdr *uh = NULL;
1258 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1259 ETH_HLEN - 2));
1260
1261 if (proto == htons(ETH_P_IP)) {
1262 struct iphdr *iph = (struct iphdr *)skb->data;
1263
1264 if (iph->protocol == IPPROTO_UDP)
1265 uh = (struct udphdr *)(iph + 1);
1266 } else {
1267 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1268
1269 if (iph->nexthdr == IPPROTO_UDP)
1270 uh = (struct udphdr *)(iph + 1);
1271 }
1272 if (uh) {
1273 if (uh->check)
1274 skb_shinfo(skb)->gso_type |=
1275 SKB_GSO_UDP_TUNNEL_CSUM;
1276 else
1277 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1278 }
1279 }
1280#endif
1281 return skb;
1282}
1283
Michael Chanc0c050c2015-10-22 16:01:17 -04001284#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1285#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1286
Michael Chan309369c2016-06-13 02:25:34 -04001287static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1288 int payload_off, int tcp_ts,
Michael Chanc0c050c2015-10-22 16:01:17 -04001289 struct sk_buff *skb)
1290{
Michael Chand1611c32015-10-25 22:27:57 -04001291#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -04001292 struct tcphdr *th;
Michael Chan719ca812017-01-17 22:07:19 -05001293 int len, nw_off, tcp_opt_len = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001294
Michael Chan309369c2016-06-13 02:25:34 -04001295 if (tcp_ts)
Michael Chanc0c050c2015-10-22 16:01:17 -04001296 tcp_opt_len = 12;
1297
Michael Chanc0c050c2015-10-22 16:01:17 -04001298 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1299 struct iphdr *iph;
1300
1301 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1302 ETH_HLEN;
1303 skb_set_network_header(skb, nw_off);
1304 iph = ip_hdr(skb);
1305 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1306 len = skb->len - skb_transport_offset(skb);
1307 th = tcp_hdr(skb);
1308 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1309 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1310 struct ipv6hdr *iph;
1311
1312 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1313 ETH_HLEN;
1314 skb_set_network_header(skb, nw_off);
1315 iph = ipv6_hdr(skb);
1316 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1317 len = skb->len - skb_transport_offset(skb);
1318 th = tcp_hdr(skb);
1319 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1320 } else {
1321 dev_kfree_skb_any(skb);
1322 return NULL;
1323 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001324
1325 if (nw_off) { /* tunnel */
1326 struct udphdr *uh = NULL;
1327
1328 if (skb->protocol == htons(ETH_P_IP)) {
1329 struct iphdr *iph = (struct iphdr *)skb->data;
1330
1331 if (iph->protocol == IPPROTO_UDP)
1332 uh = (struct udphdr *)(iph + 1);
1333 } else {
1334 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1335
1336 if (iph->nexthdr == IPPROTO_UDP)
1337 uh = (struct udphdr *)(iph + 1);
1338 }
1339 if (uh) {
1340 if (uh->check)
1341 skb_shinfo(skb)->gso_type |=
1342 SKB_GSO_UDP_TUNNEL_CSUM;
1343 else
1344 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1345 }
1346 }
1347#endif
1348 return skb;
1349}
1350
Michael Chan309369c2016-06-13 02:25:34 -04001351static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1352 struct bnxt_tpa_info *tpa_info,
1353 struct rx_tpa_end_cmp *tpa_end,
1354 struct rx_tpa_end_cmp_ext *tpa_end1,
1355 struct sk_buff *skb)
1356{
1357#ifdef CONFIG_INET
1358 int payload_off;
1359 u16 segs;
1360
1361 segs = TPA_END_TPA_SEGS(tpa_end);
1362 if (segs == 1)
1363 return skb;
1364
1365 NAPI_GRO_CB(skb)->count = segs;
1366 skb_shinfo(skb)->gso_size =
1367 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1368 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1369 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1370 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1371 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1372 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
Michael Chan59109062016-12-29 12:13:35 -05001373 if (likely(skb))
1374 tcp_gro_complete(skb);
Michael Chan309369c2016-06-13 02:25:34 -04001375#endif
1376 return skb;
1377}
1378
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001379/* Given the cfa_code of a received packet determine which
1380 * netdev (vf-rep or PF) the packet is destined to.
1381 */
1382static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1383{
1384 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1385
1386 /* if vf-rep dev is NULL, the must belongs to the PF */
1387 return dev ? dev : bp->dev;
1388}
1389
Michael Chanc0c050c2015-10-22 16:01:17 -04001390static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
Michael Chane44758b2018-10-14 07:02:55 -04001391 struct bnxt_cp_ring_info *cpr,
Michael Chanc0c050c2015-10-22 16:01:17 -04001392 u32 *raw_cons,
1393 struct rx_tpa_end_cmp *tpa_end,
1394 struct rx_tpa_end_cmp_ext *tpa_end1,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001395 u8 *event)
Michael Chanc0c050c2015-10-22 16:01:17 -04001396{
Michael Chane44758b2018-10-14 07:02:55 -04001397 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001398 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001399 u8 agg_id = TPA_END_AGG_ID(tpa_end);
Michael Chan6bb19472017-02-06 16:55:32 -05001400 u8 *data_ptr, agg_bufs;
Michael Chanc0c050c2015-10-22 16:01:17 -04001401 u16 cp_cons = RING_CMP(*raw_cons);
1402 unsigned int len;
1403 struct bnxt_tpa_info *tpa_info;
1404 dma_addr_t mapping;
1405 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -05001406 void *data;
Michael Chanc0c050c2015-10-22 16:01:17 -04001407
Michael Chanfa7e2812016-05-10 19:18:00 -04001408 if (unlikely(bnapi->in_reset)) {
Michael Chane44758b2018-10-14 07:02:55 -04001409 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
Michael Chanfa7e2812016-05-10 19:18:00 -04001410
1411 if (rc < 0)
1412 return ERR_PTR(-EBUSY);
1413 return NULL;
1414 }
1415
Michael Chanc0c050c2015-10-22 16:01:17 -04001416 tpa_info = &rxr->rx_tpa[agg_id];
1417 data = tpa_info->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001418 data_ptr = tpa_info->data_ptr;
1419 prefetch(data_ptr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001420 len = tpa_info->len;
1421 mapping = tpa_info->mapping;
1422
1423 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1424 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1425
1426 if (agg_bufs) {
1427 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1428 return ERR_PTR(-EBUSY);
1429
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001430 *event |= BNXT_AGG_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001431 cp_cons = NEXT_CMP(cp_cons);
1432 }
1433
Michael Chan69c149e2017-06-23 14:01:00 -04001434 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
Michael Chane44758b2018-10-14 07:02:55 -04001435 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chan69c149e2017-06-23 14:01:00 -04001436 if (agg_bufs > MAX_SKB_FRAGS)
1437 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1438 agg_bufs, (int)MAX_SKB_FRAGS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001439 return NULL;
1440 }
1441
1442 if (len <= bp->rx_copy_thresh) {
Michael Chan6bb19472017-02-06 16:55:32 -05001443 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04001444 if (!skb) {
Michael Chane44758b2018-10-14 07:02:55 -04001445 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001446 return NULL;
1447 }
1448 } else {
1449 u8 *new_data;
1450 dma_addr_t new_mapping;
1451
1452 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1453 if (!new_data) {
Michael Chane44758b2018-10-14 07:02:55 -04001454 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001455 return NULL;
1456 }
1457
1458 tpa_info->data = new_data;
Michael Chanb3dba772017-02-06 16:55:35 -05001459 tpa_info->data_ptr = new_data + bp->rx_offset;
Michael Chanc0c050c2015-10-22 16:01:17 -04001460 tpa_info->mapping = new_mapping;
1461
1462 skb = build_skb(data, 0);
Shannon Nelsonc519fe92017-05-09 18:30:12 -07001463 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1464 bp->rx_buf_use_size, bp->rx_dir,
1465 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04001466
1467 if (!skb) {
1468 kfree(data);
Michael Chane44758b2018-10-14 07:02:55 -04001469 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001470 return NULL;
1471 }
Michael Chanb3dba772017-02-06 16:55:35 -05001472 skb_reserve(skb, bp->rx_offset);
Michael Chanc0c050c2015-10-22 16:01:17 -04001473 skb_put(skb, len);
1474 }
1475
1476 if (agg_bufs) {
Michael Chane44758b2018-10-14 07:02:55 -04001477 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001478 if (!skb) {
1479 /* Page reuse already handled by bnxt_rx_pages(). */
1480 return NULL;
1481 }
1482 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001483
1484 skb->protocol =
1485 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
Michael Chanc0c050c2015-10-22 16:01:17 -04001486
1487 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1488 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1489
Michael Chan8852ddb2016-06-06 02:37:16 -04001490 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1491 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001492 u16 vlan_proto = tpa_info->metadata >>
1493 RX_CMP_FLAGS2_METADATA_TPID_SFT;
Michael Chaned7bc6022018-03-09 23:46:06 -05001494 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001495
Michael Chan8852ddb2016-06-06 02:37:16 -04001496 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001497 }
1498
1499 skb_checksum_none_assert(skb);
1500 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1501 skb->ip_summed = CHECKSUM_UNNECESSARY;
1502 skb->csum_level =
1503 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1504 }
1505
1506 if (TPA_END_GRO(tpa_end))
Michael Chan309369c2016-06-13 02:25:34 -04001507 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001508
1509 return skb;
1510}
1511
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001512static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1513 struct sk_buff *skb)
1514{
1515 if (skb->dev != bp->dev) {
1516 /* this packet belongs to a vf-rep */
1517 bnxt_vf_rep_rx(bp, skb);
1518 return;
1519 }
1520 skb_record_rx_queue(skb, bnapi->index);
1521 napi_gro_receive(&bnapi->napi, skb);
1522}
1523
Michael Chanc0c050c2015-10-22 16:01:17 -04001524/* returns the following:
1525 * 1 - 1 packet successfully received
1526 * 0 - successful TPA_START, packet not completed yet
1527 * -EBUSY - completion ring does not have all the agg buffers yet
1528 * -ENOMEM - packet aborted due to out of memory
1529 * -EIO - packet aborted due to hw error indicated in BD
1530 */
Michael Chane44758b2018-10-14 07:02:55 -04001531static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1532 u32 *raw_cons, u8 *event)
Michael Chanc0c050c2015-10-22 16:01:17 -04001533{
Michael Chane44758b2018-10-14 07:02:55 -04001534 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001535 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001536 struct net_device *dev = bp->dev;
1537 struct rx_cmp *rxcmp;
1538 struct rx_cmp_ext *rxcmp1;
1539 u32 tmp_raw_cons = *raw_cons;
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001540 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001541 struct bnxt_sw_rx_bd *rx_buf;
1542 unsigned int len;
Michael Chan6bb19472017-02-06 16:55:32 -05001543 u8 *data_ptr, agg_bufs, cmp_type;
Michael Chanc0c050c2015-10-22 16:01:17 -04001544 dma_addr_t dma_addr;
1545 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -05001546 void *data;
Michael Chanc0c050c2015-10-22 16:01:17 -04001547 int rc = 0;
Michael Chanc61fb992017-02-06 16:55:36 -05001548 u32 misc;
Michael Chanc0c050c2015-10-22 16:01:17 -04001549
1550 rxcmp = (struct rx_cmp *)
1551 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1552
1553 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1554 cp_cons = RING_CMP(tmp_raw_cons);
1555 rxcmp1 = (struct rx_cmp_ext *)
1556 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1557
1558 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1559 return -EBUSY;
1560
1561 cmp_type = RX_CMP_TYPE(rxcmp);
1562
1563 prod = rxr->rx_prod;
1564
1565 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1566 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1567 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1568
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001569 *event |= BNXT_RX_EVENT;
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001570 goto next_rx_no_prod_no_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001571
1572 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
Michael Chane44758b2018-10-14 07:02:55 -04001573 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
Michael Chanc0c050c2015-10-22 16:01:17 -04001574 (struct rx_tpa_end_cmp *)rxcmp,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001575 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001576
Tobias Klauser1fac4b22017-09-26 15:12:26 +02001577 if (IS_ERR(skb))
Michael Chanc0c050c2015-10-22 16:01:17 -04001578 return -EBUSY;
1579
1580 rc = -ENOMEM;
1581 if (likely(skb)) {
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001582 bnxt_deliver_skb(bp, bnapi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001583 rc = 1;
1584 }
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001585 *event |= BNXT_RX_EVENT;
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001586 goto next_rx_no_prod_no_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001587 }
1588
1589 cons = rxcmp->rx_cmp_opaque;
Michael Chanfa7e2812016-05-10 19:18:00 -04001590 if (unlikely(cons != rxr->rx_next_cons)) {
Michael Chane44758b2018-10-14 07:02:55 -04001591 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
Michael Chanfa7e2812016-05-10 19:18:00 -04001592
Michael Chana1b0e4e2019-04-08 17:39:54 -04001593 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1594 cons, rxr->rx_next_cons);
Michael Chanfa7e2812016-05-10 19:18:00 -04001595 bnxt_sched_reset(bp, rxr);
1596 return rc1;
1597 }
Michael Chana1b0e4e2019-04-08 17:39:54 -04001598 rx_buf = &rxr->rx_buf_ring[cons];
1599 data = rx_buf->data;
1600 data_ptr = rx_buf->data_ptr;
Michael Chan6bb19472017-02-06 16:55:32 -05001601 prefetch(data_ptr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001602
Michael Chanc61fb992017-02-06 16:55:36 -05001603 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1604 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001605
1606 if (agg_bufs) {
1607 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1608 return -EBUSY;
1609
1610 cp_cons = NEXT_CMP(cp_cons);
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001611 *event |= BNXT_AGG_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001612 }
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001613 *event |= BNXT_RX_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001614
1615 rx_buf->data = NULL;
1616 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
Michael Chan8e44e962019-04-08 17:39:55 -04001617 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1618
Michael Chanc0c050c2015-10-22 16:01:17 -04001619 bnxt_reuse_rx_data(rxr, cons, data);
1620 if (agg_bufs)
Michael Chane44758b2018-10-14 07:02:55 -04001621 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001622
1623 rc = -EIO;
Michael Chan8e44e962019-04-08 17:39:55 -04001624 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1625 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1626 bnxt_sched_reset(bp, rxr);
1627 }
Michael Chan0b397b12019-04-25 22:31:55 -04001628 goto next_rx_no_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001629 }
1630
1631 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
Michael Chan11cd1192017-02-06 16:55:33 -05001632 dma_addr = rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001633
Michael Chanc6d30e82017-02-06 16:55:42 -05001634 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1635 rc = 1;
1636 goto next_rx;
1637 }
1638
Michael Chanc0c050c2015-10-22 16:01:17 -04001639 if (len <= bp->rx_copy_thresh) {
Michael Chan6bb19472017-02-06 16:55:32 -05001640 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001641 bnxt_reuse_rx_data(rxr, cons, data);
1642 if (!skb) {
1643 rc = -ENOMEM;
1644 goto next_rx;
1645 }
1646 } else {
Michael Chanc61fb992017-02-06 16:55:36 -05001647 u32 payload;
1648
Michael Chanc6d30e82017-02-06 16:55:42 -05001649 if (rx_buf->data_ptr == data_ptr)
1650 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1651 else
1652 payload = 0;
Michael Chan6bb19472017-02-06 16:55:32 -05001653 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
Michael Chanc61fb992017-02-06 16:55:36 -05001654 payload | len);
Michael Chanc0c050c2015-10-22 16:01:17 -04001655 if (!skb) {
1656 rc = -ENOMEM;
1657 goto next_rx;
1658 }
1659 }
1660
1661 if (agg_bufs) {
Michael Chane44758b2018-10-14 07:02:55 -04001662 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001663 if (!skb) {
1664 rc = -ENOMEM;
1665 goto next_rx;
1666 }
1667 }
1668
1669 if (RX_CMP_HASH_VALID(rxcmp)) {
1670 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1671 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1672
1673 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1674 if (hash_type != 1 && hash_type != 3)
1675 type = PKT_HASH_TYPE_L3;
1676 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1677 }
1678
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001679 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1680 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
Michael Chanc0c050c2015-10-22 16:01:17 -04001681
Michael Chan8852ddb2016-06-06 02:37:16 -04001682 if ((rxcmp1->rx_cmp_flags2 &
1683 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1684 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001685 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
Michael Chaned7bc6022018-03-09 23:46:06 -05001686 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001687 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1688
Michael Chan8852ddb2016-06-06 02:37:16 -04001689 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001690 }
1691
1692 skb_checksum_none_assert(skb);
1693 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1694 if (dev->features & NETIF_F_RXCSUM) {
1695 skb->ip_summed = CHECKSUM_UNNECESSARY;
1696 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1697 }
1698 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001699 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1700 if (dev->features & NETIF_F_RXCSUM)
Michael Chand1981922018-11-15 03:25:38 -05001701 bnapi->cp_ring.rx_l4_csum_errors++;
Satish Baddipadige665e3502015-12-27 18:19:21 -05001702 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001703 }
1704
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001705 bnxt_deliver_skb(bp, bnapi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001706 rc = 1;
1707
1708next_rx:
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05001709 cpr->rx_packets += 1;
1710 cpr->rx_bytes += len;
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001711
Michael Chan0b397b12019-04-25 22:31:55 -04001712next_rx_no_len:
1713 rxr->rx_prod = NEXT_RX(prod);
1714 rxr->rx_next_cons = NEXT_RX(cons);
1715
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001716next_rx_no_prod_no_len:
Michael Chanc0c050c2015-10-22 16:01:17 -04001717 *raw_cons = tmp_raw_cons;
1718
1719 return rc;
1720}
1721
Michael Chan2270bc52017-06-23 14:01:01 -04001722/* In netpoll mode, if we are using a combined completion ring, we need to
1723 * discard the rx packets and recycle the buffers.
1724 */
Michael Chane44758b2018-10-14 07:02:55 -04001725static int bnxt_force_rx_discard(struct bnxt *bp,
1726 struct bnxt_cp_ring_info *cpr,
Michael Chan2270bc52017-06-23 14:01:01 -04001727 u32 *raw_cons, u8 *event)
1728{
Michael Chan2270bc52017-06-23 14:01:01 -04001729 u32 tmp_raw_cons = *raw_cons;
1730 struct rx_cmp_ext *rxcmp1;
1731 struct rx_cmp *rxcmp;
1732 u16 cp_cons;
1733 u8 cmp_type;
1734
1735 cp_cons = RING_CMP(tmp_raw_cons);
1736 rxcmp = (struct rx_cmp *)
1737 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1738
1739 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1740 cp_cons = RING_CMP(tmp_raw_cons);
1741 rxcmp1 = (struct rx_cmp_ext *)
1742 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1743
1744 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1745 return -EBUSY;
1746
1747 cmp_type = RX_CMP_TYPE(rxcmp);
1748 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1749 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1750 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1751 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1752 struct rx_tpa_end_cmp_ext *tpa_end1;
1753
1754 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1755 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1756 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1757 }
Michael Chane44758b2018-10-14 07:02:55 -04001758 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
Michael Chan2270bc52017-06-23 14:01:01 -04001759}
1760
Michael Chan4bb13ab2016-04-05 14:09:01 -04001761#define BNXT_GET_EVENT_PORT(data) \
Michael Chan87c374d2016-12-02 21:17:16 -05001762 ((data) & \
1763 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
Michael Chan4bb13ab2016-04-05 14:09:01 -04001764
Michael Chanc0c050c2015-10-22 16:01:17 -04001765static int bnxt_async_event_process(struct bnxt *bp,
1766 struct hwrm_async_event_cmpl *cmpl)
1767{
1768 u16 event_id = le16_to_cpu(cmpl->event_id);
1769
1770 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1771 switch (event_id) {
Michael Chan87c374d2016-12-02 21:17:16 -05001772 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
Michael Chan8cbde112016-04-11 04:11:14 -04001773 u32 data1 = le32_to_cpu(cmpl->event_data1);
1774 struct bnxt_link_info *link_info = &bp->link_info;
1775
1776 if (BNXT_VF(bp))
1777 goto async_event_process_exit;
Michael Chana8168b62017-12-06 17:31:22 -05001778
1779 /* print unsupported speed warning in forced speed mode only */
1780 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1781 (data1 & 0x20000)) {
Michael Chan8cbde112016-04-11 04:11:14 -04001782 u16 fw_speed = link_info->force_link_speed;
1783 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1784
Michael Chana8168b62017-12-06 17:31:22 -05001785 if (speed != SPEED_UNKNOWN)
1786 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1787 speed);
Michael Chan8cbde112016-04-11 04:11:14 -04001788 }
Michael Chan286ef9d2016-11-16 21:13:08 -05001789 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
Michael Chan8cbde112016-04-11 04:11:14 -04001790 }
Gustavo A. R. Silvabc171e82018-08-07 18:11:14 -05001791 /* fall through */
Michael Chan87c374d2016-12-02 21:17:16 -05001792 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
Michael Chanc0c050c2015-10-22 16:01:17 -04001793 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
Jeffrey Huang19241362016-02-26 04:00:00 -05001794 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001795 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
Jeffrey Huang19241362016-02-26 04:00:00 -05001796 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001797 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001798 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
Michael Chan4bb13ab2016-04-05 14:09:01 -04001799 u32 data1 = le32_to_cpu(cmpl->event_data1);
1800 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1801
1802 if (BNXT_VF(bp))
1803 break;
1804
1805 if (bp->pf.port_id != port_id)
1806 break;
1807
Michael Chan4bb13ab2016-04-05 14:09:01 -04001808 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1809 break;
1810 }
Michael Chan87c374d2016-12-02 21:17:16 -05001811 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
Michael Chanfc0f1922016-06-13 02:25:30 -04001812 if (BNXT_PF(bp))
1813 goto async_event_process_exit;
1814 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1815 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001816 default:
Jeffrey Huang19241362016-02-26 04:00:00 -05001817 goto async_event_process_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -04001818 }
Michael Chanc213eae2017-10-13 21:09:29 -04001819 bnxt_queue_sp_work(bp);
Jeffrey Huang19241362016-02-26 04:00:00 -05001820async_event_process_exit:
Michael Chana588e452016-12-07 00:26:21 -05001821 bnxt_ulp_async_events(bp, cmpl);
Michael Chanc0c050c2015-10-22 16:01:17 -04001822 return 0;
1823}
1824
1825static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1826{
1827 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1828 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1829 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1830 (struct hwrm_fwd_req_cmpl *)txcmp;
1831
1832 switch (cmpl_type) {
1833 case CMPL_BASE_TYPE_HWRM_DONE:
1834 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1835 if (seq_id == bp->hwrm_intr_seq_id)
Venkat Duvvurufc718bb2018-12-20 03:38:44 -05001836 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04001837 else
1838 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1839 break;
1840
1841 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1842 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1843
1844 if ((vf_id < bp->pf.first_vf_id) ||
1845 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1846 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1847 vf_id);
1848 return -EINVAL;
1849 }
1850
1851 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1852 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04001853 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04001854 break;
1855
1856 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1857 bnxt_async_event_process(bp,
1858 (struct hwrm_async_event_cmpl *)txcmp);
1859
1860 default:
1861 break;
1862 }
1863
1864 return 0;
1865}
1866
1867static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1868{
1869 struct bnxt_napi *bnapi = dev_instance;
1870 struct bnxt *bp = bnapi->bp;
1871 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1872 u32 cons = RING_CMP(cpr->cp_raw_cons);
1873
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05001874 cpr->event_ctr++;
Michael Chanc0c050c2015-10-22 16:01:17 -04001875 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1876 napi_schedule(&bnapi->napi);
1877 return IRQ_HANDLED;
1878}
1879
1880static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1881{
1882 u32 raw_cons = cpr->cp_raw_cons;
1883 u16 cons = RING_CMP(raw_cons);
1884 struct tx_cmp *txcmp;
1885
1886 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1887
1888 return TX_CMP_VALID(txcmp, raw_cons);
1889}
1890
Michael Chanc0c050c2015-10-22 16:01:17 -04001891static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1892{
1893 struct bnxt_napi *bnapi = dev_instance;
1894 struct bnxt *bp = bnapi->bp;
1895 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1896 u32 cons = RING_CMP(cpr->cp_raw_cons);
1897 u32 int_status;
1898
1899 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1900
1901 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001902 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001903 /* return if erroneous interrupt */
1904 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1905 return IRQ_NONE;
1906 }
1907
1908 /* disable ring IRQ */
Michael Chan697197e2018-10-14 07:02:46 -04001909 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
Michael Chanc0c050c2015-10-22 16:01:17 -04001910
1911 /* Return here if interrupt is shared and is disabled. */
1912 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1913 return IRQ_HANDLED;
1914
1915 napi_schedule(&bnapi->napi);
1916 return IRQ_HANDLED;
1917}
1918
Michael Chan3675b922018-10-14 07:02:57 -04001919static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1920 int budget)
Michael Chanc0c050c2015-10-22 16:01:17 -04001921{
Michael Chane44758b2018-10-14 07:02:55 -04001922 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04001923 u32 raw_cons = cpr->cp_raw_cons;
1924 u32 cons;
1925 int tx_pkts = 0;
1926 int rx_pkts = 0;
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001927 u8 event = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001928 struct tx_cmp *txcmp;
1929
Michael Chan0fcec982018-10-14 07:02:58 -04001930 cpr->has_more_work = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001931 while (1) {
1932 int rc;
1933
1934 cons = RING_CMP(raw_cons);
1935 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1936
1937 if (!TX_CMP_VALID(txcmp, raw_cons))
1938 break;
1939
Michael Chan67a95e22016-05-04 16:56:43 -04001940 /* The valid test of the entry must be done first before
1941 * reading any further.
1942 */
Michael Chanb67daab2016-05-15 03:04:51 -04001943 dma_rmb();
Michael Chan3675b922018-10-14 07:02:57 -04001944 cpr->had_work_done = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04001945 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1946 tx_pkts++;
1947 /* return full budget so NAPI will complete. */
Michael Chan73f21c62018-09-26 00:41:04 -04001948 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001949 rx_pkts = budget;
Michael Chan73f21c62018-09-26 00:41:04 -04001950 raw_cons = NEXT_RAW_CMP(raw_cons);
Michael Chan0fcec982018-10-14 07:02:58 -04001951 if (budget)
1952 cpr->has_more_work = 1;
Michael Chan73f21c62018-09-26 00:41:04 -04001953 break;
1954 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001955 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
Michael Chan2270bc52017-06-23 14:01:01 -04001956 if (likely(budget))
Michael Chane44758b2018-10-14 07:02:55 -04001957 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
Michael Chan2270bc52017-06-23 14:01:01 -04001958 else
Michael Chane44758b2018-10-14 07:02:55 -04001959 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
Michael Chan2270bc52017-06-23 14:01:01 -04001960 &event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001961 if (likely(rc >= 0))
1962 rx_pkts += rc;
Michael Chan903649e2017-08-28 13:40:30 -04001963 /* Increment rx_pkts when rc is -ENOMEM to count towards
1964 * the NAPI budget. Otherwise, we may potentially loop
1965 * here forever if we consistently cannot allocate
1966 * buffers.
1967 */
Calvin Owens2edbdb32017-12-08 09:05:26 -08001968 else if (rc == -ENOMEM && budget)
Michael Chan903649e2017-08-28 13:40:30 -04001969 rx_pkts++;
Michael Chanc0c050c2015-10-22 16:01:17 -04001970 else if (rc == -EBUSY) /* partial completion */
1971 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001972 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1973 CMPL_BASE_TYPE_HWRM_DONE) ||
1974 (TX_CMP_TYPE(txcmp) ==
1975 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1976 (TX_CMP_TYPE(txcmp) ==
1977 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1978 bnxt_hwrm_handler(bp, txcmp);
1979 }
1980 raw_cons = NEXT_RAW_CMP(raw_cons);
1981
Michael Chan0fcec982018-10-14 07:02:58 -04001982 if (rx_pkts && rx_pkts == budget) {
1983 cpr->has_more_work = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04001984 break;
Michael Chan0fcec982018-10-14 07:02:58 -04001985 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001986 }
1987
Michael Chan38413402017-02-06 16:55:43 -05001988 if (event & BNXT_TX_EVENT) {
1989 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan38413402017-02-06 16:55:43 -05001990 u16 prod = txr->tx_prod;
1991
1992 /* Sync BD data before updating doorbell */
1993 wmb();
1994
Michael Chan697197e2018-10-14 07:02:46 -04001995 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
Michael Chan38413402017-02-06 16:55:43 -05001996 }
1997
Michael Chanc0c050c2015-10-22 16:01:17 -04001998 cpr->cp_raw_cons = raw_cons;
Michael Chan3675b922018-10-14 07:02:57 -04001999 bnapi->tx_pkts += tx_pkts;
2000 bnapi->events |= event;
2001 return rx_pkts;
2002}
2003
2004static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2005{
2006 if (bnapi->tx_pkts) {
2007 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2008 bnapi->tx_pkts = 0;
2009 }
2010
2011 if (bnapi->events & BNXT_RX_EVENT) {
2012 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2013
2014 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2015 if (bnapi->events & BNXT_AGG_EVENT)
2016 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2017 }
2018 bnapi->events = 0;
2019}
2020
2021static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2022 int budget)
2023{
2024 struct bnxt_napi *bnapi = cpr->bnapi;
2025 int rx_pkts;
2026
2027 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2028
Michael Chanc0c050c2015-10-22 16:01:17 -04002029 /* ACK completion ring before freeing tx ring and producing new
2030 * buffers in rx/agg rings to prevent overflowing the completion
2031 * ring.
2032 */
Michael Chan697197e2018-10-14 07:02:46 -04002033 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04002034
Michael Chan3675b922018-10-14 07:02:57 -04002035 __bnxt_poll_work_done(bp, bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04002036 return rx_pkts;
2037}
2038
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002039static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2040{
2041 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2042 struct bnxt *bp = bnapi->bp;
2043 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2044 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2045 struct tx_cmp *txcmp;
2046 struct rx_cmp_ext *rxcmp1;
2047 u32 cp_cons, tmp_raw_cons;
2048 u32 raw_cons = cpr->cp_raw_cons;
2049 u32 rx_pkts = 0;
Michael Chan4e5dbbda2017-02-06 16:55:37 -05002050 u8 event = 0;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002051
2052 while (1) {
2053 int rc;
2054
2055 cp_cons = RING_CMP(raw_cons);
2056 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2057
2058 if (!TX_CMP_VALID(txcmp, raw_cons))
2059 break;
2060
2061 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2062 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2063 cp_cons = RING_CMP(tmp_raw_cons);
2064 rxcmp1 = (struct rx_cmp_ext *)
2065 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2066
2067 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2068 break;
2069
2070 /* force an error to recycle the buffer */
2071 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2072 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2073
Michael Chane44758b2018-10-14 07:02:55 -04002074 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
Calvin Owens2edbdb32017-12-08 09:05:26 -08002075 if (likely(rc == -EIO) && budget)
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002076 rx_pkts++;
2077 else if (rc == -EBUSY) /* partial completion */
2078 break;
2079 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2080 CMPL_BASE_TYPE_HWRM_DONE)) {
2081 bnxt_hwrm_handler(bp, txcmp);
2082 } else {
2083 netdev_err(bp->dev,
2084 "Invalid completion received on special ring\n");
2085 }
2086 raw_cons = NEXT_RAW_CMP(raw_cons);
2087
2088 if (rx_pkts == budget)
2089 break;
2090 }
2091
2092 cpr->cp_raw_cons = raw_cons;
Michael Chan697197e2018-10-14 07:02:46 -04002093 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2094 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002095
Michael Chan434c9752017-05-29 19:06:08 -04002096 if (event & BNXT_AGG_EVENT)
Michael Chan697197e2018-10-14 07:02:46 -04002097 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002098
2099 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08002100 napi_complete_done(napi, rx_pkts);
Michael Chan697197e2018-10-14 07:02:46 -04002101 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002102 }
2103 return rx_pkts;
2104}
2105
Michael Chanc0c050c2015-10-22 16:01:17 -04002106static int bnxt_poll(struct napi_struct *napi, int budget)
2107{
2108 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2109 struct bnxt *bp = bnapi->bp;
2110 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2111 int work_done = 0;
2112
Michael Chanc0c050c2015-10-22 16:01:17 -04002113 while (1) {
Michael Chane44758b2018-10-14 07:02:55 -04002114 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
Michael Chanc0c050c2015-10-22 16:01:17 -04002115
Michael Chan73f21c62018-09-26 00:41:04 -04002116 if (work_done >= budget) {
2117 if (!budget)
Michael Chan697197e2018-10-14 07:02:46 -04002118 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04002119 break;
Michael Chan73f21c62018-09-26 00:41:04 -04002120 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002121
2122 if (!bnxt_has_work(bp, cpr)) {
Michael Chane7b95692016-12-29 12:13:32 -05002123 if (napi_complete_done(napi, work_done))
Michael Chan697197e2018-10-14 07:02:46 -04002124 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04002125 break;
2126 }
2127 }
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05002128 if (bp->flags & BNXT_FLAG_DIM) {
2129 struct net_dim_sample dim_sample;
2130
2131 net_dim_sample(cpr->event_ctr,
2132 cpr->rx_packets,
2133 cpr->rx_bytes,
2134 &dim_sample);
2135 net_dim(&cpr->dim, dim_sample);
2136 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002137 mmiowb();
Michael Chanc0c050c2015-10-22 16:01:17 -04002138 return work_done;
2139}
2140
Michael Chan0fcec982018-10-14 07:02:58 -04002141static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2142{
2143 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2144 int i, work_done = 0;
2145
2146 for (i = 0; i < 2; i++) {
2147 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2148
2149 if (cpr2) {
2150 work_done += __bnxt_poll_work(bp, cpr2,
2151 budget - work_done);
2152 cpr->has_more_work |= cpr2->has_more_work;
2153 }
2154 }
2155 return work_done;
2156}
2157
2158static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2159 u64 dbr_type, bool all)
2160{
2161 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2162 int i;
2163
2164 for (i = 0; i < 2; i++) {
2165 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2166 struct bnxt_db_info *db;
2167
2168 if (cpr2 && (all || cpr2->had_work_done)) {
2169 db = &cpr2->cp_db;
2170 writeq(db->db_key64 | dbr_type |
2171 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2172 cpr2->had_work_done = 0;
2173 }
2174 }
2175 __bnxt_poll_work_done(bp, bnapi);
2176}
2177
2178static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2179{
2180 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2181 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2182 u32 raw_cons = cpr->cp_raw_cons;
2183 struct bnxt *bp = bnapi->bp;
2184 struct nqe_cn *nqcmp;
2185 int work_done = 0;
2186 u32 cons;
2187
2188 if (cpr->has_more_work) {
2189 cpr->has_more_work = 0;
2190 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2191 if (cpr->has_more_work) {
2192 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2193 return work_done;
2194 }
2195 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2196 if (napi_complete_done(napi, work_done))
2197 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2198 return work_done;
2199 }
2200 while (1) {
2201 cons = RING_CMP(raw_cons);
2202 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2203
2204 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2205 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2206 false);
2207 cpr->cp_raw_cons = raw_cons;
2208 if (napi_complete_done(napi, work_done))
2209 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2210 cpr->cp_raw_cons);
2211 return work_done;
2212 }
2213
2214 /* The valid test of the entry must be done first before
2215 * reading any further.
2216 */
2217 dma_rmb();
2218
2219 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2220 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2221 struct bnxt_cp_ring_info *cpr2;
2222
2223 cpr2 = cpr->cp_ring_arr[idx];
2224 work_done += __bnxt_poll_work(bp, cpr2,
2225 budget - work_done);
2226 cpr->has_more_work = cpr2->has_more_work;
2227 } else {
2228 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2229 }
2230 raw_cons = NEXT_RAW_CMP(raw_cons);
2231 if (cpr->has_more_work)
2232 break;
2233 }
2234 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2235 cpr->cp_raw_cons = raw_cons;
2236 return work_done;
2237}
2238
Michael Chanc0c050c2015-10-22 16:01:17 -04002239static void bnxt_free_tx_skbs(struct bnxt *bp)
2240{
2241 int i, max_idx;
2242 struct pci_dev *pdev = bp->pdev;
2243
Michael Chanb6ab4b02016-01-02 23:44:59 -05002244 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002245 return;
2246
2247 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2248 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002249 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002250 int j;
2251
Michael Chanc0c050c2015-10-22 16:01:17 -04002252 for (j = 0; j < max_idx;) {
2253 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2254 struct sk_buff *skb = tx_buf->skb;
2255 int k, last;
2256
2257 if (!skb) {
2258 j++;
2259 continue;
2260 }
2261
2262 tx_buf->skb = NULL;
2263
2264 if (tx_buf->is_push) {
2265 dev_kfree_skb(skb);
2266 j += 2;
2267 continue;
2268 }
2269
2270 dma_unmap_single(&pdev->dev,
2271 dma_unmap_addr(tx_buf, mapping),
2272 skb_headlen(skb),
2273 PCI_DMA_TODEVICE);
2274
2275 last = tx_buf->nr_frags;
2276 j += 2;
Michael Chand612a572016-01-28 03:11:22 -05002277 for (k = 0; k < last; k++, j++) {
2278 int ring_idx = j & bp->tx_ring_mask;
Michael Chanc0c050c2015-10-22 16:01:17 -04002279 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2280
Michael Chand612a572016-01-28 03:11:22 -05002281 tx_buf = &txr->tx_buf_ring[ring_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04002282 dma_unmap_page(
2283 &pdev->dev,
2284 dma_unmap_addr(tx_buf, mapping),
2285 skb_frag_size(frag), PCI_DMA_TODEVICE);
2286 }
2287 dev_kfree_skb(skb);
2288 }
2289 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2290 }
2291}
2292
2293static void bnxt_free_rx_skbs(struct bnxt *bp)
2294{
2295 int i, max_idx, max_agg_idx;
2296 struct pci_dev *pdev = bp->pdev;
2297
Michael Chanb6ab4b02016-01-02 23:44:59 -05002298 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002299 return;
2300
2301 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2302 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2303 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002304 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002305 int j;
2306
Michael Chanc0c050c2015-10-22 16:01:17 -04002307 if (rxr->rx_tpa) {
2308 for (j = 0; j < MAX_TPA; j++) {
2309 struct bnxt_tpa_info *tpa_info =
2310 &rxr->rx_tpa[j];
2311 u8 *data = tpa_info->data;
2312
2313 if (!data)
2314 continue;
2315
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002316 dma_unmap_single_attrs(&pdev->dev,
2317 tpa_info->mapping,
2318 bp->rx_buf_use_size,
2319 bp->rx_dir,
2320 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04002321
2322 tpa_info->data = NULL;
2323
2324 kfree(data);
2325 }
2326 }
2327
2328 for (j = 0; j < max_idx; j++) {
2329 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
Michael Chan3ed3a832017-03-28 19:47:31 -04002330 dma_addr_t mapping = rx_buf->mapping;
Michael Chan6bb19472017-02-06 16:55:32 -05002331 void *data = rx_buf->data;
Michael Chanc0c050c2015-10-22 16:01:17 -04002332
2333 if (!data)
2334 continue;
2335
Michael Chanc0c050c2015-10-22 16:01:17 -04002336 rx_buf->data = NULL;
2337
Michael Chan3ed3a832017-03-28 19:47:31 -04002338 if (BNXT_RX_PAGE_MODE(bp)) {
2339 mapping -= bp->rx_dma_offset;
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002340 dma_unmap_page_attrs(&pdev->dev, mapping,
2341 PAGE_SIZE, bp->rx_dir,
2342 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -05002343 __free_page(data);
Michael Chan3ed3a832017-03-28 19:47:31 -04002344 } else {
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002345 dma_unmap_single_attrs(&pdev->dev, mapping,
2346 bp->rx_buf_use_size,
2347 bp->rx_dir,
2348 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -05002349 kfree(data);
Michael Chan3ed3a832017-03-28 19:47:31 -04002350 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002351 }
2352
2353 for (j = 0; j < max_agg_idx; j++) {
2354 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2355 &rxr->rx_agg_ring[j];
2356 struct page *page = rx_agg_buf->page;
2357
2358 if (!page)
2359 continue;
2360
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002361 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2362 BNXT_RX_PAGE_SIZE,
2363 PCI_DMA_FROMDEVICE,
2364 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04002365
2366 rx_agg_buf->page = NULL;
2367 __clear_bit(j, rxr->rx_agg_bmap);
2368
2369 __free_page(page);
2370 }
Michael Chan89d0a062016-04-25 02:30:51 -04002371 if (rxr->rx_page) {
2372 __free_page(rxr->rx_page);
2373 rxr->rx_page = NULL;
2374 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002375 }
2376}
2377
2378static void bnxt_free_skbs(struct bnxt *bp)
2379{
2380 bnxt_free_tx_skbs(bp);
2381 bnxt_free_rx_skbs(bp);
2382}
2383
Michael Chan6fe19882018-10-14 07:02:41 -04002384static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
Michael Chanc0c050c2015-10-22 16:01:17 -04002385{
2386 struct pci_dev *pdev = bp->pdev;
2387 int i;
2388
Michael Chan6fe19882018-10-14 07:02:41 -04002389 for (i = 0; i < rmem->nr_pages; i++) {
2390 if (!rmem->pg_arr[i])
Michael Chanc0c050c2015-10-22 16:01:17 -04002391 continue;
2392
Michael Chan6fe19882018-10-14 07:02:41 -04002393 dma_free_coherent(&pdev->dev, rmem->page_size,
2394 rmem->pg_arr[i], rmem->dma_arr[i]);
Michael Chanc0c050c2015-10-22 16:01:17 -04002395
Michael Chan6fe19882018-10-14 07:02:41 -04002396 rmem->pg_arr[i] = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002397 }
Michael Chan6fe19882018-10-14 07:02:41 -04002398 if (rmem->pg_tbl) {
Michael Chan4f49b2b2018-12-20 03:38:49 -05002399 size_t pg_tbl_size = rmem->nr_pages * 8;
2400
2401 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2402 pg_tbl_size = rmem->page_size;
2403 dma_free_coherent(&pdev->dev, pg_tbl_size,
Michael Chan6fe19882018-10-14 07:02:41 -04002404 rmem->pg_tbl, rmem->pg_tbl_map);
2405 rmem->pg_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002406 }
Michael Chan6fe19882018-10-14 07:02:41 -04002407 if (rmem->vmem_size && *rmem->vmem) {
2408 vfree(*rmem->vmem);
2409 *rmem->vmem = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002410 }
2411}
2412
Michael Chan6fe19882018-10-14 07:02:41 -04002413static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
Michael Chanc0c050c2015-10-22 16:01:17 -04002414{
Michael Chanc0c050c2015-10-22 16:01:17 -04002415 struct pci_dev *pdev = bp->pdev;
Michael Chan66cca202018-10-14 07:02:42 -04002416 u64 valid_bit = 0;
Michael Chan6fe19882018-10-14 07:02:41 -04002417 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04002418
Michael Chan66cca202018-10-14 07:02:42 -04002419 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2420 valid_bit = PTU_PTE_VALID;
Michael Chan4f49b2b2018-12-20 03:38:49 -05002421 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2422 size_t pg_tbl_size = rmem->nr_pages * 8;
2423
2424 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2425 pg_tbl_size = rmem->page_size;
2426 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
Michael Chan6fe19882018-10-14 07:02:41 -04002427 &rmem->pg_tbl_map,
Michael Chanc0c050c2015-10-22 16:01:17 -04002428 GFP_KERNEL);
Michael Chan6fe19882018-10-14 07:02:41 -04002429 if (!rmem->pg_tbl)
Michael Chanc0c050c2015-10-22 16:01:17 -04002430 return -ENOMEM;
2431 }
2432
Michael Chan6fe19882018-10-14 07:02:41 -04002433 for (i = 0; i < rmem->nr_pages; i++) {
Michael Chan66cca202018-10-14 07:02:42 -04002434 u64 extra_bits = valid_bit;
2435
Michael Chan6fe19882018-10-14 07:02:41 -04002436 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2437 rmem->page_size,
2438 &rmem->dma_arr[i],
Michael Chanc0c050c2015-10-22 16:01:17 -04002439 GFP_KERNEL);
Michael Chan6fe19882018-10-14 07:02:41 -04002440 if (!rmem->pg_arr[i])
Michael Chanc0c050c2015-10-22 16:01:17 -04002441 return -ENOMEM;
2442
Michael Chan4f49b2b2018-12-20 03:38:49 -05002443 if (rmem->nr_pages > 1 || rmem->depth > 0) {
Michael Chan66cca202018-10-14 07:02:42 -04002444 if (i == rmem->nr_pages - 2 &&
2445 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2446 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2447 else if (i == rmem->nr_pages - 1 &&
2448 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2449 extra_bits |= PTU_PTE_LAST;
2450 rmem->pg_tbl[i] =
2451 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2452 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002453 }
2454
Michael Chan6fe19882018-10-14 07:02:41 -04002455 if (rmem->vmem_size) {
2456 *rmem->vmem = vzalloc(rmem->vmem_size);
2457 if (!(*rmem->vmem))
Michael Chanc0c050c2015-10-22 16:01:17 -04002458 return -ENOMEM;
2459 }
2460 return 0;
2461}
2462
2463static void bnxt_free_rx_rings(struct bnxt *bp)
2464{
2465 int i;
2466
Michael Chanb6ab4b02016-01-02 23:44:59 -05002467 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002468 return;
2469
2470 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002471 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002472 struct bnxt_ring_struct *ring;
2473
Michael Chanc6d30e82017-02-06 16:55:42 -05002474 if (rxr->xdp_prog)
2475 bpf_prog_put(rxr->xdp_prog);
2476
Jesper Dangaard Brouer96a86042018-01-03 11:25:44 +01002477 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2478 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2479
Michael Chanc0c050c2015-10-22 16:01:17 -04002480 kfree(rxr->rx_tpa);
2481 rxr->rx_tpa = NULL;
2482
2483 kfree(rxr->rx_agg_bmap);
2484 rxr->rx_agg_bmap = NULL;
2485
2486 ring = &rxr->rx_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002487 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002488
2489 ring = &rxr->rx_agg_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002490 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002491 }
2492}
2493
2494static int bnxt_alloc_rx_rings(struct bnxt *bp)
2495{
2496 int i, rc, agg_rings = 0, tpa_rings = 0;
2497
Michael Chanb6ab4b02016-01-02 23:44:59 -05002498 if (!bp->rx_ring)
2499 return -ENOMEM;
2500
Michael Chanc0c050c2015-10-22 16:01:17 -04002501 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2502 agg_rings = 1;
2503
2504 if (bp->flags & BNXT_FLAG_TPA)
2505 tpa_rings = 1;
2506
2507 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002508 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002509 struct bnxt_ring_struct *ring;
2510
Michael Chanc0c050c2015-10-22 16:01:17 -04002511 ring = &rxr->rx_ring_struct;
2512
Jesper Dangaard Brouer96a86042018-01-03 11:25:44 +01002513 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2514 if (rc < 0)
2515 return rc;
2516
Michael Chan6fe19882018-10-14 07:02:41 -04002517 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002518 if (rc)
2519 return rc;
2520
Michael Chan2c61d212018-10-14 07:02:50 -04002521 ring->grp_idx = i;
Michael Chanc0c050c2015-10-22 16:01:17 -04002522 if (agg_rings) {
2523 u16 mem_size;
2524
2525 ring = &rxr->rx_agg_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002526 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002527 if (rc)
2528 return rc;
2529
Michael Chan9899bb52018-03-31 13:54:16 -04002530 ring->grp_idx = i;
Michael Chanc0c050c2015-10-22 16:01:17 -04002531 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2532 mem_size = rxr->rx_agg_bmap_size / 8;
2533 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2534 if (!rxr->rx_agg_bmap)
2535 return -ENOMEM;
2536
2537 if (tpa_rings) {
2538 rxr->rx_tpa = kcalloc(MAX_TPA,
2539 sizeof(struct bnxt_tpa_info),
2540 GFP_KERNEL);
2541 if (!rxr->rx_tpa)
2542 return -ENOMEM;
2543 }
2544 }
2545 }
2546 return 0;
2547}
2548
2549static void bnxt_free_tx_rings(struct bnxt *bp)
2550{
2551 int i;
2552 struct pci_dev *pdev = bp->pdev;
2553
Michael Chanb6ab4b02016-01-02 23:44:59 -05002554 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002555 return;
2556
2557 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002558 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002559 struct bnxt_ring_struct *ring;
2560
Michael Chanc0c050c2015-10-22 16:01:17 -04002561 if (txr->tx_push) {
2562 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2563 txr->tx_push, txr->tx_push_mapping);
2564 txr->tx_push = NULL;
2565 }
2566
2567 ring = &txr->tx_ring_struct;
2568
Michael Chan6fe19882018-10-14 07:02:41 -04002569 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002570 }
2571}
2572
2573static int bnxt_alloc_tx_rings(struct bnxt *bp)
2574{
2575 int i, j, rc;
2576 struct pci_dev *pdev = bp->pdev;
2577
2578 bp->tx_push_size = 0;
2579 if (bp->tx_push_thresh) {
2580 int push_size;
2581
2582 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2583 bp->tx_push_thresh);
2584
Michael Chan4419dbe2016-02-10 17:33:49 -05002585 if (push_size > 256) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002586 push_size = 0;
2587 bp->tx_push_thresh = 0;
2588 }
2589
2590 bp->tx_push_size = push_size;
2591 }
2592
2593 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002594 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002595 struct bnxt_ring_struct *ring;
Michael Chan2e8ef772018-04-26 17:44:31 -04002596 u8 qidx;
Michael Chanc0c050c2015-10-22 16:01:17 -04002597
Michael Chanc0c050c2015-10-22 16:01:17 -04002598 ring = &txr->tx_ring_struct;
2599
Michael Chan6fe19882018-10-14 07:02:41 -04002600 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002601 if (rc)
2602 return rc;
2603
Michael Chan9899bb52018-03-31 13:54:16 -04002604 ring->grp_idx = txr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04002605 if (bp->tx_push_size) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002606 dma_addr_t mapping;
2607
2608 /* One pre-allocated DMA buffer to backup
2609 * TX push operation
2610 */
2611 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2612 bp->tx_push_size,
2613 &txr->tx_push_mapping,
2614 GFP_KERNEL);
2615
2616 if (!txr->tx_push)
2617 return -ENOMEM;
2618
Michael Chanc0c050c2015-10-22 16:01:17 -04002619 mapping = txr->tx_push_mapping +
2620 sizeof(struct tx_push_bd);
Michael Chan4419dbe2016-02-10 17:33:49 -05002621 txr->data_mapping = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04002622
Michael Chan4419dbe2016-02-10 17:33:49 -05002623 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
Michael Chanc0c050c2015-10-22 16:01:17 -04002624 }
Michael Chan2e8ef772018-04-26 17:44:31 -04002625 qidx = bp->tc_to_qidx[j];
2626 ring->queue_id = bp->q_info[qidx].queue_id;
Michael Chan5f449242017-02-06 16:55:40 -05002627 if (i < bp->tx_nr_rings_xdp)
2628 continue;
Michael Chanc0c050c2015-10-22 16:01:17 -04002629 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2630 j++;
2631 }
2632 return 0;
2633}
2634
2635static void bnxt_free_cp_rings(struct bnxt *bp)
2636{
2637 int i;
2638
2639 if (!bp->bnapi)
2640 return;
2641
2642 for (i = 0; i < bp->cp_nr_rings; i++) {
2643 struct bnxt_napi *bnapi = bp->bnapi[i];
2644 struct bnxt_cp_ring_info *cpr;
2645 struct bnxt_ring_struct *ring;
Michael Chan50e3ab72018-10-14 07:02:49 -04002646 int j;
Michael Chanc0c050c2015-10-22 16:01:17 -04002647
2648 if (!bnapi)
2649 continue;
2650
2651 cpr = &bnapi->cp_ring;
2652 ring = &cpr->cp_ring_struct;
2653
Michael Chan6fe19882018-10-14 07:02:41 -04002654 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chan50e3ab72018-10-14 07:02:49 -04002655
2656 for (j = 0; j < 2; j++) {
2657 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2658
2659 if (cpr2) {
2660 ring = &cpr2->cp_ring_struct;
2661 bnxt_free_ring(bp, &ring->ring_mem);
2662 kfree(cpr2);
2663 cpr->cp_ring_arr[j] = NULL;
2664 }
2665 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002666 }
2667}
2668
Michael Chan50e3ab72018-10-14 07:02:49 -04002669static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2670{
2671 struct bnxt_ring_mem_info *rmem;
2672 struct bnxt_ring_struct *ring;
2673 struct bnxt_cp_ring_info *cpr;
2674 int rc;
2675
2676 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2677 if (!cpr)
2678 return NULL;
2679
2680 ring = &cpr->cp_ring_struct;
2681 rmem = &ring->ring_mem;
2682 rmem->nr_pages = bp->cp_nr_pages;
2683 rmem->page_size = HW_CMPD_RING_SIZE;
2684 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2685 rmem->dma_arr = cpr->cp_desc_mapping;
2686 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2687 rc = bnxt_alloc_ring(bp, rmem);
2688 if (rc) {
2689 bnxt_free_ring(bp, rmem);
2690 kfree(cpr);
2691 cpr = NULL;
2692 }
2693 return cpr;
2694}
2695
Michael Chanc0c050c2015-10-22 16:01:17 -04002696static int bnxt_alloc_cp_rings(struct bnxt *bp)
2697{
Michael Chan50e3ab72018-10-14 07:02:49 -04002698 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
Michael Chane5811b82018-03-31 13:54:18 -04002699 int i, rc, ulp_base_vec, ulp_msix;
Michael Chanc0c050c2015-10-22 16:01:17 -04002700
Michael Chane5811b82018-03-31 13:54:18 -04002701 ulp_msix = bnxt_get_ulp_msix_num(bp);
2702 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04002703 for (i = 0; i < bp->cp_nr_rings; i++) {
2704 struct bnxt_napi *bnapi = bp->bnapi[i];
2705 struct bnxt_cp_ring_info *cpr;
2706 struct bnxt_ring_struct *ring;
2707
2708 if (!bnapi)
2709 continue;
2710
2711 cpr = &bnapi->cp_ring;
Michael Chan50e3ab72018-10-14 07:02:49 -04002712 cpr->bnapi = bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04002713 ring = &cpr->cp_ring_struct;
2714
Michael Chan6fe19882018-10-14 07:02:41 -04002715 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002716 if (rc)
2717 return rc;
Michael Chane5811b82018-03-31 13:54:18 -04002718
2719 if (ulp_msix && i >= ulp_base_vec)
2720 ring->map_idx = i + ulp_msix;
2721 else
2722 ring->map_idx = i;
Michael Chan50e3ab72018-10-14 07:02:49 -04002723
2724 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2725 continue;
2726
2727 if (i < bp->rx_nr_rings) {
2728 struct bnxt_cp_ring_info *cpr2 =
2729 bnxt_alloc_cp_sub_ring(bp);
2730
2731 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2732 if (!cpr2)
2733 return -ENOMEM;
2734 cpr2->bnapi = bnapi;
2735 }
2736 if ((sh && i < bp->tx_nr_rings) ||
2737 (!sh && i >= bp->rx_nr_rings)) {
2738 struct bnxt_cp_ring_info *cpr2 =
2739 bnxt_alloc_cp_sub_ring(bp);
2740
2741 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2742 if (!cpr2)
2743 return -ENOMEM;
2744 cpr2->bnapi = bnapi;
2745 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002746 }
2747 return 0;
2748}
2749
2750static void bnxt_init_ring_struct(struct bnxt *bp)
2751{
2752 int i;
2753
2754 for (i = 0; i < bp->cp_nr_rings; i++) {
2755 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chan6fe19882018-10-14 07:02:41 -04002756 struct bnxt_ring_mem_info *rmem;
Michael Chanc0c050c2015-10-22 16:01:17 -04002757 struct bnxt_cp_ring_info *cpr;
2758 struct bnxt_rx_ring_info *rxr;
2759 struct bnxt_tx_ring_info *txr;
2760 struct bnxt_ring_struct *ring;
2761
2762 if (!bnapi)
2763 continue;
2764
2765 cpr = &bnapi->cp_ring;
2766 ring = &cpr->cp_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002767 rmem = &ring->ring_mem;
2768 rmem->nr_pages = bp->cp_nr_pages;
2769 rmem->page_size = HW_CMPD_RING_SIZE;
2770 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2771 rmem->dma_arr = cpr->cp_desc_mapping;
2772 rmem->vmem_size = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04002773
Michael Chanb6ab4b02016-01-02 23:44:59 -05002774 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002775 if (!rxr)
2776 goto skip_rx;
2777
Michael Chanc0c050c2015-10-22 16:01:17 -04002778 ring = &rxr->rx_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002779 rmem = &ring->ring_mem;
2780 rmem->nr_pages = bp->rx_nr_pages;
2781 rmem->page_size = HW_RXBD_RING_SIZE;
2782 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2783 rmem->dma_arr = rxr->rx_desc_mapping;
2784 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2785 rmem->vmem = (void **)&rxr->rx_buf_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04002786
2787 ring = &rxr->rx_agg_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002788 rmem = &ring->ring_mem;
2789 rmem->nr_pages = bp->rx_agg_nr_pages;
2790 rmem->page_size = HW_RXBD_RING_SIZE;
2791 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2792 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2793 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2794 rmem->vmem = (void **)&rxr->rx_agg_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04002795
Michael Chan3b2b7d92016-01-02 23:45:00 -05002796skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05002797 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002798 if (!txr)
2799 continue;
2800
Michael Chanc0c050c2015-10-22 16:01:17 -04002801 ring = &txr->tx_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002802 rmem = &ring->ring_mem;
2803 rmem->nr_pages = bp->tx_nr_pages;
2804 rmem->page_size = HW_RXBD_RING_SIZE;
2805 rmem->pg_arr = (void **)txr->tx_desc_ring;
2806 rmem->dma_arr = txr->tx_desc_mapping;
2807 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2808 rmem->vmem = (void **)&txr->tx_buf_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04002809 }
2810}
2811
2812static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2813{
2814 int i;
2815 u32 prod;
2816 struct rx_bd **rx_buf_ring;
2817
Michael Chan6fe19882018-10-14 07:02:41 -04002818 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2819 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002820 int j;
2821 struct rx_bd *rxbd;
2822
2823 rxbd = rx_buf_ring[i];
2824 if (!rxbd)
2825 continue;
2826
2827 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2828 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2829 rxbd->rx_bd_opaque = prod;
2830 }
2831 }
2832}
2833
2834static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2835{
2836 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04002837 struct bnxt_rx_ring_info *rxr;
2838 struct bnxt_ring_struct *ring;
2839 u32 prod, type;
2840 int i;
2841
Michael Chanc0c050c2015-10-22 16:01:17 -04002842 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2843 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2844
2845 if (NET_IP_ALIGN == 2)
2846 type |= RX_BD_FLAGS_SOP;
2847
Michael Chanb6ab4b02016-01-02 23:44:59 -05002848 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04002849 ring = &rxr->rx_ring_struct;
2850 bnxt_init_rxbd_pages(ring, type);
2851
Michael Chanc6d30e82017-02-06 16:55:42 -05002852 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2853 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2854 if (IS_ERR(rxr->xdp_prog)) {
2855 int rc = PTR_ERR(rxr->xdp_prog);
2856
2857 rxr->xdp_prog = NULL;
2858 return rc;
2859 }
2860 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002861 prod = rxr->rx_prod;
2862 for (i = 0; i < bp->rx_ring_size; i++) {
2863 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2864 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2865 ring_nr, i, bp->rx_ring_size);
2866 break;
2867 }
2868 prod = NEXT_RX(prod);
2869 }
2870 rxr->rx_prod = prod;
2871 ring->fw_ring_id = INVALID_HW_RING_ID;
2872
Michael Chanedd0c2c2015-12-27 18:19:19 -05002873 ring = &rxr->rx_agg_ring_struct;
2874 ring->fw_ring_id = INVALID_HW_RING_ID;
2875
Michael Chanc0c050c2015-10-22 16:01:17 -04002876 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2877 return 0;
2878
Michael Chan2839f282016-04-25 02:30:50 -04002879 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
Michael Chanc0c050c2015-10-22 16:01:17 -04002880 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2881
2882 bnxt_init_rxbd_pages(ring, type);
2883
2884 prod = rxr->rx_agg_prod;
2885 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2886 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2887 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2888 ring_nr, i, bp->rx_ring_size);
2889 break;
2890 }
2891 prod = NEXT_RX_AGG(prod);
2892 }
2893 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04002894
2895 if (bp->flags & BNXT_FLAG_TPA) {
2896 if (rxr->rx_tpa) {
2897 u8 *data;
2898 dma_addr_t mapping;
2899
2900 for (i = 0; i < MAX_TPA; i++) {
2901 data = __bnxt_alloc_rx_data(bp, &mapping,
2902 GFP_KERNEL);
2903 if (!data)
2904 return -ENOMEM;
2905
2906 rxr->rx_tpa[i].data = data;
Michael Chanb3dba772017-02-06 16:55:35 -05002907 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
Michael Chanc0c050c2015-10-22 16:01:17 -04002908 rxr->rx_tpa[i].mapping = mapping;
2909 }
2910 } else {
2911 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2912 return -ENOMEM;
2913 }
2914 }
2915
2916 return 0;
2917}
2918
Sankar Patchineelam22479252017-03-28 19:47:29 -04002919static void bnxt_init_cp_rings(struct bnxt *bp)
2920{
Michael Chan3e08b182018-10-14 07:02:52 -04002921 int i, j;
Sankar Patchineelam22479252017-03-28 19:47:29 -04002922
2923 for (i = 0; i < bp->cp_nr_rings; i++) {
2924 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2925 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2926
2927 ring->fw_ring_id = INVALID_HW_RING_ID;
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05002928 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2929 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
Michael Chan3e08b182018-10-14 07:02:52 -04002930 for (j = 0; j < 2; j++) {
2931 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2932
2933 if (!cpr2)
2934 continue;
2935
2936 ring = &cpr2->cp_ring_struct;
2937 ring->fw_ring_id = INVALID_HW_RING_ID;
2938 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2939 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2940 }
Sankar Patchineelam22479252017-03-28 19:47:29 -04002941 }
2942}
2943
Michael Chanc0c050c2015-10-22 16:01:17 -04002944static int bnxt_init_rx_rings(struct bnxt *bp)
2945{
2946 int i, rc = 0;
2947
Michael Chanc61fb992017-02-06 16:55:36 -05002948 if (BNXT_RX_PAGE_MODE(bp)) {
Michael Chanc6d30e82017-02-06 16:55:42 -05002949 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2950 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
Michael Chanc61fb992017-02-06 16:55:36 -05002951 } else {
2952 bp->rx_offset = BNXT_RX_OFFSET;
2953 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2954 }
Michael Chanb3dba772017-02-06 16:55:35 -05002955
Michael Chanc0c050c2015-10-22 16:01:17 -04002956 for (i = 0; i < bp->rx_nr_rings; i++) {
2957 rc = bnxt_init_one_rx_ring(bp, i);
2958 if (rc)
2959 break;
2960 }
2961
2962 return rc;
2963}
2964
2965static int bnxt_init_tx_rings(struct bnxt *bp)
2966{
2967 u16 i;
2968
2969 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2970 MAX_SKB_FRAGS + 1);
2971
2972 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002973 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002974 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2975
2976 ring->fw_ring_id = INVALID_HW_RING_ID;
2977 }
2978
2979 return 0;
2980}
2981
2982static void bnxt_free_ring_grps(struct bnxt *bp)
2983{
2984 kfree(bp->grp_info);
2985 bp->grp_info = NULL;
2986}
2987
2988static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2989{
2990 int i;
2991
2992 if (irq_re_init) {
2993 bp->grp_info = kcalloc(bp->cp_nr_rings,
2994 sizeof(struct bnxt_ring_grp_info),
2995 GFP_KERNEL);
2996 if (!bp->grp_info)
2997 return -ENOMEM;
2998 }
2999 for (i = 0; i < bp->cp_nr_rings; i++) {
3000 if (irq_re_init)
3001 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3002 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3003 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3004 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3005 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3006 }
3007 return 0;
3008}
3009
3010static void bnxt_free_vnics(struct bnxt *bp)
3011{
3012 kfree(bp->vnic_info);
3013 bp->vnic_info = NULL;
3014 bp->nr_vnics = 0;
3015}
3016
3017static int bnxt_alloc_vnics(struct bnxt *bp)
3018{
3019 int num_vnics = 1;
3020
3021#ifdef CONFIG_RFS_ACCEL
3022 if (bp->flags & BNXT_FLAG_RFS)
3023 num_vnics += bp->rx_nr_rings;
3024#endif
3025
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003026 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3027 num_vnics++;
3028
Michael Chanc0c050c2015-10-22 16:01:17 -04003029 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3030 GFP_KERNEL);
3031 if (!bp->vnic_info)
3032 return -ENOMEM;
3033
3034 bp->nr_vnics = num_vnics;
3035 return 0;
3036}
3037
3038static void bnxt_init_vnics(struct bnxt *bp)
3039{
3040 int i;
3041
3042 for (i = 0; i < bp->nr_vnics; i++) {
3043 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
Michael Chan44c6f722018-10-14 07:02:53 -04003044 int j;
Michael Chanc0c050c2015-10-22 16:01:17 -04003045
3046 vnic->fw_vnic_id = INVALID_HW_RING_ID;
Michael Chan44c6f722018-10-14 07:02:53 -04003047 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3048 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3049
Michael Chanc0c050c2015-10-22 16:01:17 -04003050 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3051
3052 if (bp->vnic_info[i].rss_hash_key) {
3053 if (i == 0)
3054 prandom_bytes(vnic->rss_hash_key,
3055 HW_HASH_KEY_SIZE);
3056 else
3057 memcpy(vnic->rss_hash_key,
3058 bp->vnic_info[0].rss_hash_key,
3059 HW_HASH_KEY_SIZE);
3060 }
3061 }
3062}
3063
3064static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3065{
3066 int pages;
3067
3068 pages = ring_size / desc_per_pg;
3069
3070 if (!pages)
3071 return 1;
3072
3073 pages++;
3074
3075 while (pages & (pages - 1))
3076 pages++;
3077
3078 return pages;
3079}
3080
Michael Chanc6d30e82017-02-06 16:55:42 -05003081void bnxt_set_tpa_flags(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04003082{
3083 bp->flags &= ~BNXT_FLAG_TPA;
Michael Chan341138c2017-01-13 01:32:01 -05003084 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3085 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04003086 if (bp->dev->features & NETIF_F_LRO)
3087 bp->flags |= BNXT_FLAG_LRO;
Michael Chan1054aee2017-12-16 03:09:42 -05003088 else if (bp->dev->features & NETIF_F_GRO_HW)
Michael Chanc0c050c2015-10-22 16:01:17 -04003089 bp->flags |= BNXT_FLAG_GRO;
3090}
3091
3092/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3093 * be set on entry.
3094 */
3095void bnxt_set_ring_params(struct bnxt *bp)
3096{
3097 u32 ring_size, rx_size, rx_space;
3098 u32 agg_factor = 0, agg_ring_size = 0;
3099
3100 /* 8 for CRC and VLAN */
3101 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3102
3103 rx_space = rx_size + NET_SKB_PAD +
3104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3105
3106 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3107 ring_size = bp->rx_ring_size;
3108 bp->rx_agg_ring_size = 0;
3109 bp->rx_agg_nr_pages = 0;
3110
3111 if (bp->flags & BNXT_FLAG_TPA)
Michael Chan2839f282016-04-25 02:30:50 -04003112 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
Michael Chanc0c050c2015-10-22 16:01:17 -04003113
3114 bp->flags &= ~BNXT_FLAG_JUMBO;
Michael Chanbdbd1eb2016-12-29 12:13:43 -05003115 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003116 u32 jumbo_factor;
3117
3118 bp->flags |= BNXT_FLAG_JUMBO;
3119 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3120 if (jumbo_factor > agg_factor)
3121 agg_factor = jumbo_factor;
3122 }
3123 agg_ring_size = ring_size * agg_factor;
3124
3125 if (agg_ring_size) {
3126 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3127 RX_DESC_CNT);
3128 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3129 u32 tmp = agg_ring_size;
3130
3131 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3132 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3133 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3134 tmp, agg_ring_size);
3135 }
3136 bp->rx_agg_ring_size = agg_ring_size;
3137 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3138 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3139 rx_space = rx_size + NET_SKB_PAD +
3140 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3141 }
3142
3143 bp->rx_buf_use_size = rx_size;
3144 bp->rx_buf_size = rx_space;
3145
3146 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3147 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3148
3149 ring_size = bp->tx_ring_size;
3150 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3151 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3152
3153 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3154 bp->cp_ring_size = ring_size;
3155
3156 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3157 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3158 bp->cp_nr_pages = MAX_CP_PAGES;
3159 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3160 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3161 ring_size, bp->cp_ring_size);
3162 }
3163 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3164 bp->cp_ring_mask = bp->cp_bit - 1;
3165}
3166
Jesper Dangaard Brouer96a86042018-01-03 11:25:44 +01003167/* Changing allocation mode of RX rings.
3168 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3169 */
Michael Chanc61fb992017-02-06 16:55:36 -05003170int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
Michael Chan6bb19472017-02-06 16:55:32 -05003171{
Michael Chanc61fb992017-02-06 16:55:36 -05003172 if (page_mode) {
3173 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3174 return -EOPNOTSUPP;
Michael Chan7eb9bb32017-10-26 11:51:25 -04003175 bp->dev->max_mtu =
3176 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
Michael Chanc61fb992017-02-06 16:55:36 -05003177 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3178 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
Michael Chanc61fb992017-02-06 16:55:36 -05003179 bp->rx_dir = DMA_BIDIRECTIONAL;
3180 bp->rx_skb_func = bnxt_rx_page_skb;
Michael Chan1054aee2017-12-16 03:09:42 -05003181 /* Disable LRO or GRO_HW */
3182 netdev_update_features(bp->dev);
Michael Chanc61fb992017-02-06 16:55:36 -05003183 } else {
Michael Chan7eb9bb32017-10-26 11:51:25 -04003184 bp->dev->max_mtu = bp->max_mtu;
Michael Chanc61fb992017-02-06 16:55:36 -05003185 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3186 bp->rx_dir = DMA_FROM_DEVICE;
3187 bp->rx_skb_func = bnxt_rx_skb;
3188 }
Michael Chan6bb19472017-02-06 16:55:32 -05003189 return 0;
3190}
3191
Michael Chanc0c050c2015-10-22 16:01:17 -04003192static void bnxt_free_vnic_attributes(struct bnxt *bp)
3193{
3194 int i;
3195 struct bnxt_vnic_info *vnic;
3196 struct pci_dev *pdev = bp->pdev;
3197
3198 if (!bp->vnic_info)
3199 return;
3200
3201 for (i = 0; i < bp->nr_vnics; i++) {
3202 vnic = &bp->vnic_info[i];
3203
3204 kfree(vnic->fw_grp_ids);
3205 vnic->fw_grp_ids = NULL;
3206
3207 kfree(vnic->uc_list);
3208 vnic->uc_list = NULL;
3209
3210 if (vnic->mc_list) {
3211 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3212 vnic->mc_list, vnic->mc_list_mapping);
3213 vnic->mc_list = NULL;
3214 }
3215
3216 if (vnic->rss_table) {
3217 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3218 vnic->rss_table,
3219 vnic->rss_table_dma_addr);
3220 vnic->rss_table = NULL;
3221 }
3222
3223 vnic->rss_hash_key = NULL;
3224 vnic->flags = 0;
3225 }
3226}
3227
3228static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3229{
3230 int i, rc = 0, size;
3231 struct bnxt_vnic_info *vnic;
3232 struct pci_dev *pdev = bp->pdev;
3233 int max_rings;
3234
3235 for (i = 0; i < bp->nr_vnics; i++) {
3236 vnic = &bp->vnic_info[i];
3237
3238 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3239 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3240
3241 if (mem_size > 0) {
3242 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3243 if (!vnic->uc_list) {
3244 rc = -ENOMEM;
3245 goto out;
3246 }
3247 }
3248 }
3249
3250 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3251 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3252 vnic->mc_list =
3253 dma_alloc_coherent(&pdev->dev,
3254 vnic->mc_list_size,
3255 &vnic->mc_list_mapping,
3256 GFP_KERNEL);
3257 if (!vnic->mc_list) {
3258 rc = -ENOMEM;
3259 goto out;
3260 }
3261 }
3262
Michael Chan44c6f722018-10-14 07:02:53 -04003263 if (bp->flags & BNXT_FLAG_CHIP_P5)
3264 goto vnic_skip_grps;
3265
Michael Chanc0c050c2015-10-22 16:01:17 -04003266 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3267 max_rings = bp->rx_nr_rings;
3268 else
3269 max_rings = 1;
3270
3271 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3272 if (!vnic->fw_grp_ids) {
3273 rc = -ENOMEM;
3274 goto out;
3275 }
Michael Chan44c6f722018-10-14 07:02:53 -04003276vnic_skip_grps:
Michael Chanae10ae72016-12-29 12:13:38 -05003277 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3278 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3279 continue;
3280
Michael Chanc0c050c2015-10-22 16:01:17 -04003281 /* Allocate rss table and hash key */
3282 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3283 &vnic->rss_table_dma_addr,
3284 GFP_KERNEL);
3285 if (!vnic->rss_table) {
3286 rc = -ENOMEM;
3287 goto out;
3288 }
3289
3290 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3291
3292 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3293 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3294 }
3295 return 0;
3296
3297out:
3298 return rc;
3299}
3300
3301static void bnxt_free_hwrm_resources(struct bnxt *bp)
3302{
3303 struct pci_dev *pdev = bp->pdev;
3304
Venkat Duvvurua2bf74f2018-10-05 00:26:02 -04003305 if (bp->hwrm_cmd_resp_addr) {
3306 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3307 bp->hwrm_cmd_resp_dma_addr);
3308 bp->hwrm_cmd_resp_addr = NULL;
3309 }
Venkat Duvvuru760b6d32018-12-20 03:38:48 -05003310
3311 if (bp->hwrm_cmd_kong_resp_addr) {
3312 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3313 bp->hwrm_cmd_kong_resp_addr,
3314 bp->hwrm_cmd_kong_resp_dma_addr);
3315 bp->hwrm_cmd_kong_resp_addr = NULL;
3316 }
3317}
3318
3319static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3320{
3321 struct pci_dev *pdev = bp->pdev;
3322
3323 bp->hwrm_cmd_kong_resp_addr =
3324 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3325 &bp->hwrm_cmd_kong_resp_dma_addr,
3326 GFP_KERNEL);
3327 if (!bp->hwrm_cmd_kong_resp_addr)
3328 return -ENOMEM;
3329
3330 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003331}
3332
3333static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3334{
3335 struct pci_dev *pdev = bp->pdev;
3336
3337 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3338 &bp->hwrm_cmd_resp_dma_addr,
3339 GFP_KERNEL);
3340 if (!bp->hwrm_cmd_resp_addr)
3341 return -ENOMEM;
Michael Chanc0c050c2015-10-22 16:01:17 -04003342
3343 return 0;
3344}
3345
Deepak Khungare605db82017-05-29 19:06:04 -04003346static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3347{
3348 if (bp->hwrm_short_cmd_req_addr) {
3349 struct pci_dev *pdev = bp->pdev;
3350
Michael Chan1dfddc42018-10-14 07:02:39 -04003351 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
Deepak Khungare605db82017-05-29 19:06:04 -04003352 bp->hwrm_short_cmd_req_addr,
3353 bp->hwrm_short_cmd_req_dma_addr);
3354 bp->hwrm_short_cmd_req_addr = NULL;
3355 }
3356}
3357
3358static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3359{
3360 struct pci_dev *pdev = bp->pdev;
3361
3362 bp->hwrm_short_cmd_req_addr =
Michael Chan1dfddc42018-10-14 07:02:39 -04003363 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
Deepak Khungare605db82017-05-29 19:06:04 -04003364 &bp->hwrm_short_cmd_req_dma_addr,
3365 GFP_KERNEL);
3366 if (!bp->hwrm_short_cmd_req_addr)
3367 return -ENOMEM;
3368
3369 return 0;
3370}
3371
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003372static void bnxt_free_port_stats(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04003373{
Michael Chanc0c050c2015-10-22 16:01:17 -04003374 struct pci_dev *pdev = bp->pdev;
3375
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003376 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3377 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3378
Michael Chan3bdf56c2016-03-07 15:38:45 -05003379 if (bp->hw_rx_port_stats) {
3380 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3381 bp->hw_rx_port_stats,
3382 bp->hw_rx_port_stats_map);
3383 bp->hw_rx_port_stats = NULL;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003384 }
3385
Michael Chan36e53342018-10-14 07:02:38 -04003386 if (bp->hw_tx_port_stats_ext) {
3387 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3388 bp->hw_tx_port_stats_ext,
3389 bp->hw_tx_port_stats_ext_map);
3390 bp->hw_tx_port_stats_ext = NULL;
3391 }
3392
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003393 if (bp->hw_rx_port_stats_ext) {
3394 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3395 bp->hw_rx_port_stats_ext,
3396 bp->hw_rx_port_stats_ext_map);
3397 bp->hw_rx_port_stats_ext = NULL;
Michael Chan3bdf56c2016-03-07 15:38:45 -05003398 }
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003399}
3400
3401static void bnxt_free_ring_stats(struct bnxt *bp)
3402{
3403 struct pci_dev *pdev = bp->pdev;
3404 int size, i;
Michael Chan3bdf56c2016-03-07 15:38:45 -05003405
Michael Chanc0c050c2015-10-22 16:01:17 -04003406 if (!bp->bnapi)
3407 return;
3408
3409 size = sizeof(struct ctx_hw_stats);
3410
3411 for (i = 0; i < bp->cp_nr_rings; i++) {
3412 struct bnxt_napi *bnapi = bp->bnapi[i];
3413 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3414
3415 if (cpr->hw_stats) {
3416 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3417 cpr->hw_stats_map);
3418 cpr->hw_stats = NULL;
3419 }
3420 }
3421}
3422
3423static int bnxt_alloc_stats(struct bnxt *bp)
3424{
3425 u32 size, i;
3426 struct pci_dev *pdev = bp->pdev;
3427
3428 size = sizeof(struct ctx_hw_stats);
3429
3430 for (i = 0; i < bp->cp_nr_rings; i++) {
3431 struct bnxt_napi *bnapi = bp->bnapi[i];
3432 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3433
3434 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3435 &cpr->hw_stats_map,
3436 GFP_KERNEL);
3437 if (!cpr->hw_stats)
3438 return -ENOMEM;
3439
3440 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3441 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05003442
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04003443 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003444 if (bp->hw_rx_port_stats)
3445 goto alloc_ext_stats;
3446
Michael Chan3bdf56c2016-03-07 15:38:45 -05003447 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3448 sizeof(struct tx_port_stats) + 1024;
3449
3450 bp->hw_rx_port_stats =
3451 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3452 &bp->hw_rx_port_stats_map,
3453 GFP_KERNEL);
3454 if (!bp->hw_rx_port_stats)
3455 return -ENOMEM;
3456
3457 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3458 512;
3459 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3460 sizeof(struct rx_port_stats) + 512;
3461 bp->flags |= BNXT_FLAG_PORT_STATS;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003462
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003463alloc_ext_stats:
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003464 /* Display extended statistics only if FW supports it */
3465 if (bp->hwrm_spec_code < 0x10804 ||
3466 bp->hwrm_spec_code == 0x10900)
3467 return 0;
3468
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003469 if (bp->hw_rx_port_stats_ext)
3470 goto alloc_tx_ext_stats;
3471
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003472 bp->hw_rx_port_stats_ext =
Luis Chamberlain750afb02019-01-04 09:23:09 +01003473 dma_alloc_coherent(&pdev->dev,
3474 sizeof(struct rx_port_stats_ext),
3475 &bp->hw_rx_port_stats_ext_map,
3476 GFP_KERNEL);
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003477 if (!bp->hw_rx_port_stats_ext)
3478 return 0;
3479
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003480alloc_tx_ext_stats:
3481 if (bp->hw_tx_port_stats_ext)
3482 return 0;
3483
Michael Chan36e53342018-10-14 07:02:38 -04003484 if (bp->hwrm_spec_code >= 0x10902) {
3485 bp->hw_tx_port_stats_ext =
Luis Chamberlain750afb02019-01-04 09:23:09 +01003486 dma_alloc_coherent(&pdev->dev,
3487 sizeof(struct tx_port_stats_ext),
3488 &bp->hw_tx_port_stats_ext_map,
3489 GFP_KERNEL);
Michael Chan36e53342018-10-14 07:02:38 -04003490 }
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003491 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
Michael Chan3bdf56c2016-03-07 15:38:45 -05003492 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003493 return 0;
3494}
3495
3496static void bnxt_clear_ring_indices(struct bnxt *bp)
3497{
3498 int i;
3499
3500 if (!bp->bnapi)
3501 return;
3502
3503 for (i = 0; i < bp->cp_nr_rings; i++) {
3504 struct bnxt_napi *bnapi = bp->bnapi[i];
3505 struct bnxt_cp_ring_info *cpr;
3506 struct bnxt_rx_ring_info *rxr;
3507 struct bnxt_tx_ring_info *txr;
3508
3509 if (!bnapi)
3510 continue;
3511
3512 cpr = &bnapi->cp_ring;
3513 cpr->cp_raw_cons = 0;
3514
Michael Chanb6ab4b02016-01-02 23:44:59 -05003515 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003516 if (txr) {
3517 txr->tx_prod = 0;
3518 txr->tx_cons = 0;
3519 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003520
Michael Chanb6ab4b02016-01-02 23:44:59 -05003521 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003522 if (rxr) {
3523 rxr->rx_prod = 0;
3524 rxr->rx_agg_prod = 0;
3525 rxr->rx_sw_agg_prod = 0;
Michael Chan376a5b82016-05-10 19:17:59 -04003526 rxr->rx_next_cons = 0;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003527 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003528 }
3529}
3530
3531static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3532{
3533#ifdef CONFIG_RFS_ACCEL
3534 int i;
3535
3536 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3537 * safe to delete the hash table.
3538 */
3539 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3540 struct hlist_head *head;
3541 struct hlist_node *tmp;
3542 struct bnxt_ntuple_filter *fltr;
3543
3544 head = &bp->ntp_fltr_hash_tbl[i];
3545 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3546 hlist_del(&fltr->hash);
3547 kfree(fltr);
3548 }
3549 }
3550 if (irq_reinit) {
3551 kfree(bp->ntp_fltr_bmap);
3552 bp->ntp_fltr_bmap = NULL;
3553 }
3554 bp->ntp_fltr_count = 0;
3555#endif
3556}
3557
3558static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3559{
3560#ifdef CONFIG_RFS_ACCEL
3561 int i, rc = 0;
3562
3563 if (!(bp->flags & BNXT_FLAG_RFS))
3564 return 0;
3565
3566 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3567 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3568
3569 bp->ntp_fltr_count = 0;
Dan Carpenterac45bd92017-05-06 03:49:01 +03003570 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3571 sizeof(long),
Michael Chanc0c050c2015-10-22 16:01:17 -04003572 GFP_KERNEL);
3573
3574 if (!bp->ntp_fltr_bmap)
3575 rc = -ENOMEM;
3576
3577 return rc;
3578#else
3579 return 0;
3580#endif
3581}
3582
3583static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3584{
3585 bnxt_free_vnic_attributes(bp);
3586 bnxt_free_tx_rings(bp);
3587 bnxt_free_rx_rings(bp);
3588 bnxt_free_cp_rings(bp);
3589 bnxt_free_ntp_fltrs(bp, irq_re_init);
3590 if (irq_re_init) {
Michael Chanfd3ab1c2018-12-16 18:46:30 -05003591 bnxt_free_ring_stats(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04003592 bnxt_free_ring_grps(bp);
3593 bnxt_free_vnics(bp);
Michael Chana960dec2017-02-06 16:55:39 -05003594 kfree(bp->tx_ring_map);
3595 bp->tx_ring_map = NULL;
Michael Chanb6ab4b02016-01-02 23:44:59 -05003596 kfree(bp->tx_ring);
3597 bp->tx_ring = NULL;
3598 kfree(bp->rx_ring);
3599 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04003600 kfree(bp->bnapi);
3601 bp->bnapi = NULL;
3602 } else {
3603 bnxt_clear_ring_indices(bp);
3604 }
3605}
3606
3607static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3608{
Michael Chan01657bc2016-01-02 23:45:03 -05003609 int i, j, rc, size, arr_size;
Michael Chanc0c050c2015-10-22 16:01:17 -04003610 void *bnapi;
3611
3612 if (irq_re_init) {
3613 /* Allocate bnapi mem pointer array and mem block for
3614 * all queues
3615 */
3616 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3617 bp->cp_nr_rings);
3618 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3619 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3620 if (!bnapi)
3621 return -ENOMEM;
3622
3623 bp->bnapi = bnapi;
3624 bnapi += arr_size;
3625 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3626 bp->bnapi[i] = bnapi;
3627 bp->bnapi[i]->index = i;
3628 bp->bnapi[i]->bp = bp;
Michael Chane38287b2018-10-14 07:02:45 -04003629 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3630 struct bnxt_cp_ring_info *cpr =
3631 &bp->bnapi[i]->cp_ring;
3632
3633 cpr->cp_ring_struct.ring_mem.flags =
3634 BNXT_RMEM_RING_PTE_FLAG;
3635 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003636 }
3637
Michael Chanb6ab4b02016-01-02 23:44:59 -05003638 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3639 sizeof(struct bnxt_rx_ring_info),
3640 GFP_KERNEL);
3641 if (!bp->rx_ring)
3642 return -ENOMEM;
3643
3644 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chane38287b2018-10-14 07:02:45 -04003645 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3646
3647 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3648 rxr->rx_ring_struct.ring_mem.flags =
3649 BNXT_RMEM_RING_PTE_FLAG;
3650 rxr->rx_agg_ring_struct.ring_mem.flags =
3651 BNXT_RMEM_RING_PTE_FLAG;
3652 }
3653 rxr->bnapi = bp->bnapi[i];
Michael Chanb6ab4b02016-01-02 23:44:59 -05003654 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3655 }
3656
3657 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3658 sizeof(struct bnxt_tx_ring_info),
3659 GFP_KERNEL);
3660 if (!bp->tx_ring)
3661 return -ENOMEM;
3662
Michael Chana960dec2017-02-06 16:55:39 -05003663 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3664 GFP_KERNEL);
3665
3666 if (!bp->tx_ring_map)
3667 return -ENOMEM;
3668
Michael Chan01657bc2016-01-02 23:45:03 -05003669 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3670 j = 0;
3671 else
3672 j = bp->rx_nr_rings;
3673
3674 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
Michael Chane38287b2018-10-14 07:02:45 -04003675 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3676
3677 if (bp->flags & BNXT_FLAG_CHIP_P5)
3678 txr->tx_ring_struct.ring_mem.flags =
3679 BNXT_RMEM_RING_PTE_FLAG;
3680 txr->bnapi = bp->bnapi[j];
3681 bp->bnapi[j]->tx_ring = txr;
Michael Chan5f449242017-02-06 16:55:40 -05003682 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
Michael Chan38413402017-02-06 16:55:43 -05003683 if (i >= bp->tx_nr_rings_xdp) {
Michael Chane38287b2018-10-14 07:02:45 -04003684 txr->txq_index = i - bp->tx_nr_rings_xdp;
Michael Chan38413402017-02-06 16:55:43 -05003685 bp->bnapi[j]->tx_int = bnxt_tx_int;
3686 } else {
Michael Chanfa3e93e2017-02-06 16:55:41 -05003687 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
Michael Chan38413402017-02-06 16:55:43 -05003688 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3689 }
Michael Chanb6ab4b02016-01-02 23:44:59 -05003690 }
3691
Michael Chanc0c050c2015-10-22 16:01:17 -04003692 rc = bnxt_alloc_stats(bp);
3693 if (rc)
3694 goto alloc_mem_err;
3695
3696 rc = bnxt_alloc_ntp_fltrs(bp);
3697 if (rc)
3698 goto alloc_mem_err;
3699
3700 rc = bnxt_alloc_vnics(bp);
3701 if (rc)
3702 goto alloc_mem_err;
3703 }
3704
3705 bnxt_init_ring_struct(bp);
3706
3707 rc = bnxt_alloc_rx_rings(bp);
3708 if (rc)
3709 goto alloc_mem_err;
3710
3711 rc = bnxt_alloc_tx_rings(bp);
3712 if (rc)
3713 goto alloc_mem_err;
3714
3715 rc = bnxt_alloc_cp_rings(bp);
3716 if (rc)
3717 goto alloc_mem_err;
3718
3719 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3720 BNXT_VNIC_UCAST_FLAG;
3721 rc = bnxt_alloc_vnic_attributes(bp);
3722 if (rc)
3723 goto alloc_mem_err;
3724 return 0;
3725
3726alloc_mem_err:
3727 bnxt_free_mem(bp, true);
3728 return rc;
3729}
3730
Michael Chan9d8bc092016-12-29 12:13:33 -05003731static void bnxt_disable_int(struct bnxt *bp)
3732{
3733 int i;
3734
3735 if (!bp->bnapi)
3736 return;
3737
3738 for (i = 0; i < bp->cp_nr_rings; i++) {
3739 struct bnxt_napi *bnapi = bp->bnapi[i];
3740 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chandaf1f1e2017-02-20 19:25:17 -05003741 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chan9d8bc092016-12-29 12:13:33 -05003742
Michael Chandaf1f1e2017-02-20 19:25:17 -05003743 if (ring->fw_ring_id != INVALID_HW_RING_ID)
Michael Chan697197e2018-10-14 07:02:46 -04003744 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chan9d8bc092016-12-29 12:13:33 -05003745 }
3746}
3747
Michael Chane5811b82018-03-31 13:54:18 -04003748static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3749{
3750 struct bnxt_napi *bnapi = bp->bnapi[n];
3751 struct bnxt_cp_ring_info *cpr;
3752
3753 cpr = &bnapi->cp_ring;
3754 return cpr->cp_ring_struct.map_idx;
3755}
3756
Michael Chan9d8bc092016-12-29 12:13:33 -05003757static void bnxt_disable_int_sync(struct bnxt *bp)
3758{
3759 int i;
3760
3761 atomic_inc(&bp->intr_sem);
3762
3763 bnxt_disable_int(bp);
Michael Chane5811b82018-03-31 13:54:18 -04003764 for (i = 0; i < bp->cp_nr_rings; i++) {
3765 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3766
3767 synchronize_irq(bp->irq_tbl[map_idx].vector);
3768 }
Michael Chan9d8bc092016-12-29 12:13:33 -05003769}
3770
3771static void bnxt_enable_int(struct bnxt *bp)
3772{
3773 int i;
3774
3775 atomic_set(&bp->intr_sem, 0);
3776 for (i = 0; i < bp->cp_nr_rings; i++) {
3777 struct bnxt_napi *bnapi = bp->bnapi[i];
3778 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3779
Michael Chan697197e2018-10-14 07:02:46 -04003780 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chan9d8bc092016-12-29 12:13:33 -05003781 }
3782}
3783
Michael Chanc0c050c2015-10-22 16:01:17 -04003784void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3785 u16 cmpl_ring, u16 target_id)
3786{
Michael Chana8643e12016-02-26 04:00:05 -05003787 struct input *req = request;
Michael Chanc0c050c2015-10-22 16:01:17 -04003788
Michael Chana8643e12016-02-26 04:00:05 -05003789 req->req_type = cpu_to_le16(req_type);
3790 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3791 req->target_id = cpu_to_le16(target_id);
Venkat Duvvuru760b6d32018-12-20 03:38:48 -05003792 if (bnxt_kong_hwrm_message(bp, req))
3793 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
3794 else
3795 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
Michael Chanc0c050c2015-10-22 16:01:17 -04003796}
3797
Michael Chanfbfbc482016-02-26 04:00:07 -05003798static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3799 int timeout, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003800{
Michael Chana11fa2b2016-05-15 03:04:47 -04003801 int i, intr_process, rc, tmo_count;
Michael Chana8643e12016-02-26 04:00:05 -05003802 struct input *req = msg;
Michael Chanc0c050c2015-10-22 16:01:17 -04003803 u32 *data = msg;
Michael Chan845adfe2018-03-31 13:54:15 -04003804 __le32 *resp_len;
3805 u8 *valid;
Michael Chanc0c050c2015-10-22 16:01:17 -04003806 u16 cp_ring_id, len = 0;
3807 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
Deepak Khungare605db82017-05-29 19:06:04 -04003808 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
Vasundhara Volamebd58182017-12-01 03:13:05 -05003809 struct hwrm_short_input short_input = {0};
Venkat Duvvuru2e9ee392018-12-20 03:38:45 -05003810 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
Venkat Duvvuru89455012018-12-20 03:38:46 -05003811 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
Venkat Duvvuru2e9ee392018-12-20 03:38:45 -05003812 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
Venkat Duvvuru760b6d32018-12-20 03:38:48 -05003813 u16 dst = BNXT_HWRM_CHNL_CHIMP;
Michael Chanc0c050c2015-10-22 16:01:17 -04003814
Michael Chan1dfddc42018-10-14 07:02:39 -04003815 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3816 if (msg_len > bp->hwrm_max_ext_req_len ||
3817 !bp->hwrm_short_cmd_req_addr)
3818 return -EINVAL;
3819 }
3820
Venkat Duvvuru760b6d32018-12-20 03:38:48 -05003821 if (bnxt_hwrm_kong_chnl(bp, req)) {
3822 dst = BNXT_HWRM_CHNL_KONG;
3823 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
3824 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
3825 resp = bp->hwrm_cmd_kong_resp_addr;
3826 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
3827 }
3828
3829 memset(resp, 0, PAGE_SIZE);
3830 cp_ring_id = le16_to_cpu(req->cmpl_ring);
3831 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3832
3833 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
3834 /* currently supports only one outstanding message */
3835 if (intr_process)
3836 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3837
Michael Chan1dfddc42018-10-14 07:02:39 -04003838 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3839 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
Deepak Khungare605db82017-05-29 19:06:04 -04003840 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
Michael Chan1dfddc42018-10-14 07:02:39 -04003841 u16 max_msg_len;
3842
3843 /* Set boundary for maximum extended request length for short
3844 * cmd format. If passed up from device use the max supported
3845 * internal req length.
3846 */
3847 max_msg_len = bp->hwrm_max_ext_req_len;
Deepak Khungare605db82017-05-29 19:06:04 -04003848
3849 memcpy(short_cmd_req, req, msg_len);
Michael Chan1dfddc42018-10-14 07:02:39 -04003850 if (msg_len < max_msg_len)
3851 memset(short_cmd_req + msg_len, 0,
3852 max_msg_len - msg_len);
Deepak Khungare605db82017-05-29 19:06:04 -04003853
3854 short_input.req_type = req->req_type;
3855 short_input.signature =
3856 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3857 short_input.size = cpu_to_le16(msg_len);
3858 short_input.req_addr =
3859 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3860
3861 data = (u32 *)&short_input;
3862 msg_len = sizeof(short_input);
3863
3864 /* Sync memory write before updating doorbell */
3865 wmb();
3866
3867 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3868 }
3869
Michael Chanc0c050c2015-10-22 16:01:17 -04003870 /* Write request msg to hwrm channel */
Venkat Duvvuru2e9ee392018-12-20 03:38:45 -05003871 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
Michael Chanc0c050c2015-10-22 16:01:17 -04003872
Deepak Khungare605db82017-05-29 19:06:04 -04003873 for (i = msg_len; i < max_req_len; i += 4)
Venkat Duvvuru2e9ee392018-12-20 03:38:45 -05003874 writel(0, bp->bar0 + bar_offset + i);
Michael Chand79979a2016-01-07 19:56:57 -05003875
Michael Chanc0c050c2015-10-22 16:01:17 -04003876 /* Ring channel doorbell */
Venkat Duvvuru2e9ee392018-12-20 03:38:45 -05003877 writel(1, bp->bar0 + doorbell_offset);
Michael Chanc0c050c2015-10-22 16:01:17 -04003878
Michael Chanff4fe812016-02-26 04:00:04 -05003879 if (!timeout)
3880 timeout = DFLT_HWRM_CMD_TIMEOUT;
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003881 /* convert timeout to usec */
3882 timeout *= 1000;
Michael Chanff4fe812016-02-26 04:00:04 -05003883
Michael Chanc0c050c2015-10-22 16:01:17 -04003884 i = 0;
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003885 /* Short timeout for the first few iterations:
3886 * number of loops = number of loops for short timeout +
3887 * number of loops for standard timeout.
3888 */
3889 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3890 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3891 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
Venkat Duvvuru89455012018-12-20 03:38:46 -05003892 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
3893
Michael Chanc0c050c2015-10-22 16:01:17 -04003894 if (intr_process) {
Venkat Duvvurufc718bb2018-12-20 03:38:44 -05003895 u16 seq_id = bp->hwrm_intr_seq_id;
3896
Michael Chanc0c050c2015-10-22 16:01:17 -04003897 /* Wait until hwrm response cmpl interrupt is processed */
Venkat Duvvurufc718bb2018-12-20 03:38:44 -05003898 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
Michael Chana11fa2b2016-05-15 03:04:47 -04003899 i++ < tmo_count) {
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003900 /* on first few passes, just barely sleep */
3901 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3902 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3903 HWRM_SHORT_MAX_TIMEOUT);
3904 else
3905 usleep_range(HWRM_MIN_TIMEOUT,
3906 HWRM_MAX_TIMEOUT);
Michael Chanc0c050c2015-10-22 16:01:17 -04003907 }
3908
Venkat Duvvurufc718bb2018-12-20 03:38:44 -05003909 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003910 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
Michael Chana8643e12016-02-26 04:00:05 -05003911 le16_to_cpu(req->req_type));
Michael Chanc0c050c2015-10-22 16:01:17 -04003912 return -1;
3913 }
Michael Chan845adfe2018-03-31 13:54:15 -04003914 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3915 HWRM_RESP_LEN_SFT;
Venkat Duvvuru89455012018-12-20 03:38:46 -05003916 valid = resp_addr + len - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04003917 } else {
Michael Chancc559c12018-05-08 03:18:38 -04003918 int j;
3919
Michael Chanc0c050c2015-10-22 16:01:17 -04003920 /* Check if response len is updated */
Michael Chana11fa2b2016-05-15 03:04:47 -04003921 for (i = 0; i < tmo_count; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003922 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3923 HWRM_RESP_LEN_SFT;
3924 if (len)
3925 break;
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003926 /* on first few passes, just barely sleep */
Michael Chan67681d02019-02-20 19:07:31 -05003927 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003928 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3929 HWRM_SHORT_MAX_TIMEOUT);
3930 else
3931 usleep_range(HWRM_MIN_TIMEOUT,
3932 HWRM_MAX_TIMEOUT);
Michael Chanc0c050c2015-10-22 16:01:17 -04003933 }
3934
Michael Chana11fa2b2016-05-15 03:04:47 -04003935 if (i >= tmo_count) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003936 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
Michael Chancc559c12018-05-08 03:18:38 -04003937 HWRM_TOTAL_TIMEOUT(i),
3938 le16_to_cpu(req->req_type),
Michael Chan8578d6c2016-05-15 03:04:48 -04003939 le16_to_cpu(req->seq_id), len);
Michael Chanc0c050c2015-10-22 16:01:17 -04003940 return -1;
3941 }
3942
Michael Chan845adfe2018-03-31 13:54:15 -04003943 /* Last byte of resp contains valid bit */
Venkat Duvvuru89455012018-12-20 03:38:46 -05003944 valid = resp_addr + len - 1;
Michael Chancc559c12018-05-08 03:18:38 -04003945 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
Michael Chan845adfe2018-03-31 13:54:15 -04003946 /* make sure we read from updated DMA memory */
3947 dma_rmb();
3948 if (*valid)
Michael Chanc0c050c2015-10-22 16:01:17 -04003949 break;
Michael Chan0000b812019-02-20 19:07:32 -05003950 usleep_range(1, 5);
Michael Chanc0c050c2015-10-22 16:01:17 -04003951 }
3952
Michael Chancc559c12018-05-08 03:18:38 -04003953 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003954 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
Michael Chancc559c12018-05-08 03:18:38 -04003955 HWRM_TOTAL_TIMEOUT(i),
3956 le16_to_cpu(req->req_type),
Michael Chana8643e12016-02-26 04:00:05 -05003957 le16_to_cpu(req->seq_id), len, *valid);
Michael Chanc0c050c2015-10-22 16:01:17 -04003958 return -1;
3959 }
3960 }
3961
Michael Chan845adfe2018-03-31 13:54:15 -04003962 /* Zero valid bit for compatibility. Valid bit in an older spec
3963 * may become a new field in a newer spec. We must make sure that
3964 * a new field not implemented by old spec will read zero.
3965 */
3966 *valid = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003967 rc = le16_to_cpu(resp->error_code);
Michael Chanfbfbc482016-02-26 04:00:07 -05003968 if (rc && !silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003969 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3970 le16_to_cpu(resp->req_type),
3971 le16_to_cpu(resp->seq_id), rc);
Michael Chanfbfbc482016-02-26 04:00:07 -05003972 return rc;
3973}
3974
3975int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3976{
3977 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04003978}
3979
Michael Chancc72f3b2017-10-13 21:09:33 -04003980int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3981 int timeout)
3982{
3983 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3984}
3985
Michael Chanc0c050c2015-10-22 16:01:17 -04003986int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3987{
3988 int rc;
3989
3990 mutex_lock(&bp->hwrm_cmd_lock);
3991 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3992 mutex_unlock(&bp->hwrm_cmd_lock);
3993 return rc;
3994}
3995
Michael Chan90e209212016-02-26 04:00:08 -05003996int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3997 int timeout)
3998{
3999 int rc;
4000
4001 mutex_lock(&bp->hwrm_cmd_lock);
4002 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4003 mutex_unlock(&bp->hwrm_cmd_lock);
4004 return rc;
4005}
4006
Michael Chana1653b12016-12-07 00:26:20 -05004007int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
4008 int bmap_size)
Michael Chanc0c050c2015-10-22 16:01:17 -04004009{
4010 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25be8622016-04-05 14:09:00 -04004011 DECLARE_BITMAP(async_events_bmap, 256);
4012 u32 *events = (u32 *)async_events_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05004013 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04004014
4015 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4016
4017 req.enables =
Michael Chana1653b12016-12-07 00:26:20 -05004018 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
Michael Chanc0c050c2015-10-22 16:01:17 -04004019
Michael Chan25be8622016-04-05 14:09:00 -04004020 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4021 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4022 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4023
Michael Chana1653b12016-12-07 00:26:20 -05004024 if (bmap && bmap_size) {
4025 for (i = 0; i < bmap_size; i++) {
4026 if (test_bit(i, bmap))
4027 __set_bit(i, async_events_bmap);
4028 }
4029 }
4030
Michael Chan25be8622016-04-05 14:09:00 -04004031 for (i = 0; i < 8; i++)
4032 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4033
Michael Chana1653b12016-12-07 00:26:20 -05004034 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4035}
4036
4037static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4038{
Michael Chan25e1acd2018-08-05 16:51:55 -04004039 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chana1653b12016-12-07 00:26:20 -05004040 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25e1acd2018-08-05 16:51:55 -04004041 int rc;
Michael Chana1653b12016-12-07 00:26:20 -05004042
4043 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4044
4045 req.enables =
4046 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4047 FUNC_DRV_RGTR_REQ_ENABLES_VER);
4048
Michael Chan11f15ed2016-04-05 14:08:55 -04004049 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
Michael Chand4f52de02018-03-31 13:54:06 -04004050 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4051 req.ver_maj_8b = DRV_VER_MAJ;
4052 req.ver_min_8b = DRV_VER_MIN;
4053 req.ver_upd_8b = DRV_VER_UPD;
4054 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4055 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4056 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
Michael Chanc0c050c2015-10-22 16:01:17 -04004057
4058 if (BNXT_PF(bp)) {
Michael Chan9b0436c2017-07-11 13:05:36 -04004059 u32 data[8];
Michael Chana1653b12016-12-07 00:26:20 -05004060 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04004061
Michael Chan9b0436c2017-07-11 13:05:36 -04004062 memset(data, 0, sizeof(data));
4063 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4064 u16 cmd = bnxt_vf_req_snif[i];
4065 unsigned int bit, idx;
4066
4067 idx = cmd / 32;
4068 bit = cmd % 32;
4069 data[idx] |= 1 << bit;
4070 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004071
Michael Chande68f5de2015-12-09 19:35:41 -05004072 for (i = 0; i < 8; i++)
4073 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4074
Michael Chanc0c050c2015-10-22 16:01:17 -04004075 req.enables |=
4076 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4077 }
4078
Venkat Duvvuruabd43a12018-12-20 03:38:52 -05004079 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4080 req.flags |= cpu_to_le32(
4081 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4082
Michael Chan25e1acd2018-08-05 16:51:55 -04004083 mutex_lock(&bp->hwrm_cmd_lock);
4084 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4085 if (rc)
4086 rc = -EIO;
4087 else if (resp->flags &
4088 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4089 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4090 mutex_unlock(&bp->hwrm_cmd_lock);
4091 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004092}
4093
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05004094static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4095{
4096 struct hwrm_func_drv_unrgtr_input req = {0};
4097
4098 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4099 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4100}
4101
Michael Chanc0c050c2015-10-22 16:01:17 -04004102static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4103{
4104 u32 rc = 0;
4105 struct hwrm_tunnel_dst_port_free_input req = {0};
4106
4107 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4108 req.tunnel_type = tunnel_type;
4109
4110 switch (tunnel_type) {
4111 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4112 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4113 break;
4114 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4115 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4116 break;
4117 default:
4118 break;
4119 }
4120
4121 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4122 if (rc)
4123 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4124 rc);
4125 return rc;
4126}
4127
4128static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4129 u8 tunnel_type)
4130{
4131 u32 rc = 0;
4132 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4133 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4134
4135 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4136
4137 req.tunnel_type = tunnel_type;
4138 req.tunnel_dst_port_val = port;
4139
4140 mutex_lock(&bp->hwrm_cmd_lock);
4141 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4142 if (rc) {
4143 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4144 rc);
4145 goto err_out;
4146 }
4147
Christophe Jaillet57aac712016-11-22 06:14:40 +01004148 switch (tunnel_type) {
4149 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
Michael Chanc0c050c2015-10-22 16:01:17 -04004150 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01004151 break;
4152 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
Michael Chanc0c050c2015-10-22 16:01:17 -04004153 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01004154 break;
4155 default:
4156 break;
4157 }
4158
Michael Chanc0c050c2015-10-22 16:01:17 -04004159err_out:
4160 mutex_unlock(&bp->hwrm_cmd_lock);
4161 return rc;
4162}
4163
4164static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4165{
4166 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4167 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4168
4169 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05004170 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004171
4172 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4173 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4174 req.mask = cpu_to_le32(vnic->rx_mask);
4175 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4176}
4177
4178#ifdef CONFIG_RFS_ACCEL
4179static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4180 struct bnxt_ntuple_filter *fltr)
4181{
4182 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4183
4184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4185 req.ntuple_filter_id = fltr->filter_id;
4186 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4187}
4188
4189#define BNXT_NTP_FLTR_FLAGS \
4190 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4191 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4192 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4193 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4194 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4195 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4196 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4197 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4198 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4199 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4200 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4201 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4202 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05004203 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04004204
Michael Chan61aad722017-02-12 19:18:14 -05004205#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4206 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4207
Michael Chanc0c050c2015-10-22 16:01:17 -04004208static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4209 struct bnxt_ntuple_filter *fltr)
4210{
Michael Chanc0c050c2015-10-22 16:01:17 -04004211 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
Venkat Duvvuru5c209fc2018-12-20 03:38:47 -05004212 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4213 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4214 struct flow_keys *keys = &fltr->fkeys;
4215 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004216
4217 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
Michael Chana54c4d72016-07-25 12:33:35 -04004218 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04004219
4220 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4221
4222 req.ethertype = htons(ETH_P_IP);
4223 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05004224 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04004225 req.ip_protocol = keys->basic.ip_proto;
4226
Michael Chandda0e742016-12-29 12:13:40 -05004227 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4228 int i;
4229
4230 req.ethertype = htons(ETH_P_IPV6);
4231 req.ip_addr_type =
4232 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4233 *(struct in6_addr *)&req.src_ipaddr[0] =
4234 keys->addrs.v6addrs.src;
4235 *(struct in6_addr *)&req.dst_ipaddr[0] =
4236 keys->addrs.v6addrs.dst;
4237 for (i = 0; i < 4; i++) {
4238 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4239 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4240 }
4241 } else {
4242 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4243 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4244 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4245 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4246 }
Michael Chan61aad722017-02-12 19:18:14 -05004247 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4248 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4249 req.tunnel_type =
4250 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4251 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004252
4253 req.src_port = keys->ports.src;
4254 req.src_port_mask = cpu_to_be16(0xffff);
4255 req.dst_port = keys->ports.dst;
4256 req.dst_port_mask = cpu_to_be16(0xffff);
4257
Michael Chanc1935542015-12-27 18:19:28 -05004258 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004259 mutex_lock(&bp->hwrm_cmd_lock);
4260 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Venkat Duvvuru5c209fc2018-12-20 03:38:47 -05004261 if (!rc) {
4262 resp = bnxt_get_hwrm_resp_addr(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04004263 fltr->filter_id = resp->ntuple_filter_id;
Venkat Duvvuru5c209fc2018-12-20 03:38:47 -05004264 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004265 mutex_unlock(&bp->hwrm_cmd_lock);
4266 return rc;
4267}
4268#endif
4269
4270static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4271 u8 *mac_addr)
4272{
4273 u32 rc = 0;
4274 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4275 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4276
4277 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004278 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4279 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4280 req.flags |=
4281 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05004282 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004283 req.enables =
4284 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05004285 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04004286 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4287 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4288 req.l2_addr_mask[0] = 0xff;
4289 req.l2_addr_mask[1] = 0xff;
4290 req.l2_addr_mask[2] = 0xff;
4291 req.l2_addr_mask[3] = 0xff;
4292 req.l2_addr_mask[4] = 0xff;
4293 req.l2_addr_mask[5] = 0xff;
4294
4295 mutex_lock(&bp->hwrm_cmd_lock);
4296 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4297 if (!rc)
4298 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4299 resp->l2_filter_id;
4300 mutex_unlock(&bp->hwrm_cmd_lock);
4301 return rc;
4302}
4303
4304static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4305{
4306 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4307 int rc = 0;
4308
4309 /* Any associated ntuple filters will also be cleared by firmware. */
4310 mutex_lock(&bp->hwrm_cmd_lock);
4311 for (i = 0; i < num_of_vnics; i++) {
4312 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4313
4314 for (j = 0; j < vnic->uc_filter_count; j++) {
4315 struct hwrm_cfa_l2_filter_free_input req = {0};
4316
4317 bnxt_hwrm_cmd_hdr_init(bp, &req,
4318 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4319
4320 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4321
4322 rc = _hwrm_send_message(bp, &req, sizeof(req),
4323 HWRM_CMD_TIMEOUT);
4324 }
4325 vnic->uc_filter_count = 0;
4326 }
4327 mutex_unlock(&bp->hwrm_cmd_lock);
4328
4329 return rc;
4330}
4331
4332static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4333{
4334 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4335 struct hwrm_vnic_tpa_cfg_input req = {0};
4336
Michael Chan3c4fe802018-03-09 23:46:10 -05004337 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4338 return 0;
4339
Michael Chanc0c050c2015-10-22 16:01:17 -04004340 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4341
4342 if (tpa_flags) {
4343 u16 mss = bp->dev->mtu - 40;
4344 u32 nsegs, n, segs = 0, flags;
4345
4346 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4347 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4348 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4349 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4350 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4351 if (tpa_flags & BNXT_FLAG_GRO)
4352 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4353
4354 req.flags = cpu_to_le32(flags);
4355
4356 req.enables =
4357 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05004358 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4359 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04004360
4361 /* Number of segs are log2 units, and first packet is not
4362 * included as part of this units.
4363 */
Michael Chan2839f282016-04-25 02:30:50 -04004364 if (mss <= BNXT_RX_PAGE_SIZE) {
4365 n = BNXT_RX_PAGE_SIZE / mss;
Michael Chanc0c050c2015-10-22 16:01:17 -04004366 nsegs = (MAX_SKB_FRAGS - 1) * n;
4367 } else {
Michael Chan2839f282016-04-25 02:30:50 -04004368 n = mss / BNXT_RX_PAGE_SIZE;
4369 if (mss & (BNXT_RX_PAGE_SIZE - 1))
Michael Chanc0c050c2015-10-22 16:01:17 -04004370 n++;
4371 nsegs = (MAX_SKB_FRAGS - n) / n;
4372 }
4373
4374 segs = ilog2(nsegs);
4375 req.max_agg_segs = cpu_to_le16(segs);
4376 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05004377
4378 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04004379 }
4380 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4381
4382 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4383}
4384
Michael Chan2c61d212018-10-14 07:02:50 -04004385static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4386{
4387 struct bnxt_ring_grp_info *grp_info;
4388
4389 grp_info = &bp->grp_info[ring->grp_idx];
4390 return grp_info->cp_fw_ring_id;
4391}
4392
4393static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4394{
4395 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4396 struct bnxt_napi *bnapi = rxr->bnapi;
4397 struct bnxt_cp_ring_info *cpr;
4398
4399 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4400 return cpr->cp_ring_struct.fw_ring_id;
4401 } else {
4402 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4403 }
4404}
4405
4406static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4407{
4408 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4409 struct bnxt_napi *bnapi = txr->bnapi;
4410 struct bnxt_cp_ring_info *cpr;
4411
4412 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4413 return cpr->cp_ring_struct.fw_ring_id;
4414 } else {
4415 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4416 }
4417}
4418
Michael Chanc0c050c2015-10-22 16:01:17 -04004419static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4420{
4421 u32 i, j, max_rings;
4422 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4423 struct hwrm_vnic_rss_cfg_input req = {0};
4424
Michael Chan7b3af4f2018-10-14 07:02:54 -04004425 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4426 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04004427 return 0;
4428
4429 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4430 if (set_rss) {
Michael Chan87da7f72016-11-16 21:13:09 -05004431 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
Michael Chan50f011b2018-08-05 16:51:51 -04004432 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004433 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4434 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4435 max_rings = bp->rx_nr_rings - 1;
4436 else
4437 max_rings = bp->rx_nr_rings;
4438 } else {
Michael Chanc0c050c2015-10-22 16:01:17 -04004439 max_rings = 1;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004440 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004441
4442 /* Fill the RSS indirection table with ring group ids */
4443 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4444 if (j == max_rings)
4445 j = 0;
4446 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4447 }
4448
4449 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4450 req.hash_key_tbl_addr =
4451 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4452 }
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004453 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04004454 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4455}
4456
Michael Chan7b3af4f2018-10-14 07:02:54 -04004457static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4458{
4459 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4460 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4461 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4462 struct hwrm_vnic_rss_cfg_input req = {0};
4463
4464 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4465 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4466 if (!set_rss) {
4467 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4468 return 0;
4469 }
4470 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4471 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4472 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4473 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4474 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4475 for (i = 0, k = 0; i < nr_ctxs; i++) {
4476 __le16 *ring_tbl = vnic->rss_table;
4477 int rc;
4478
4479 req.ring_table_pair_index = i;
4480 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4481 for (j = 0; j < 64; j++) {
4482 u16 ring_id;
4483
4484 ring_id = rxr->rx_ring_struct.fw_ring_id;
4485 *ring_tbl++ = cpu_to_le16(ring_id);
4486 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4487 *ring_tbl++ = cpu_to_le16(ring_id);
4488 rxr++;
4489 k++;
4490 if (k == max_rings) {
4491 k = 0;
4492 rxr = &bp->rx_ring[0];
4493 }
4494 }
4495 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4496 if (rc)
4497 return -EIO;
4498 }
4499 return 0;
4500}
4501
Michael Chanc0c050c2015-10-22 16:01:17 -04004502static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4503{
4504 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4505 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4506
4507 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4508 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4509 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4510 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4511 req.enables =
4512 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4513 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4514 /* thresholds not implemented in firmware yet */
4515 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4516 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4517 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4518 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4519}
4520
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004521static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4522 u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04004523{
4524 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4525
4526 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4527 req.rss_cos_lb_ctx_id =
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004528 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
Michael Chanc0c050c2015-10-22 16:01:17 -04004529
4530 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004531 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004532}
4533
4534static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4535{
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004536 int i, j;
Michael Chanc0c050c2015-10-22 16:01:17 -04004537
4538 for (i = 0; i < bp->nr_vnics; i++) {
4539 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4540
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004541 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4542 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4543 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4544 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004545 }
4546 bp->rsscos_nr_ctxs = 0;
4547}
4548
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004549static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04004550{
4551 int rc;
4552 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4553 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4554 bp->hwrm_cmd_resp_addr;
4555
4556 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4557 -1);
4558
4559 mutex_lock(&bp->hwrm_cmd_lock);
4560 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4561 if (!rc)
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004562 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
Michael Chanc0c050c2015-10-22 16:01:17 -04004563 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4564 mutex_unlock(&bp->hwrm_cmd_lock);
4565
4566 return rc;
4567}
4568
Michael Chanabe93ad2018-03-31 13:54:08 -04004569static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4570{
4571 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4572 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4573 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4574}
4575
Michael Chana588e452016-12-07 00:26:21 -05004576int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04004577{
Michael Chanb81a90d2016-01-02 23:45:01 -05004578 unsigned int ring = 0, grp_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04004579 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4580 struct hwrm_vnic_cfg_input req = {0};
Michael Chancf6645f2016-06-13 02:25:28 -04004581 u16 def_vlan = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004582
4583 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004584
Michael Chan7b3af4f2018-10-14 07:02:54 -04004585 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4586 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4587
4588 req.default_rx_ring_id =
4589 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4590 req.default_cmpl_ring_id =
4591 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4592 req.enables =
4593 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4594 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4595 goto vnic_mru;
4596 }
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004597 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4598 /* Only RSS support for now TBD: COS & LB */
4599 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4600 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4601 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4602 VNIC_CFG_REQ_ENABLES_MRU);
Michael Chanae10ae72016-12-29 12:13:38 -05004603 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4604 req.rss_rule =
4605 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4606 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4607 VNIC_CFG_REQ_ENABLES_MRU);
4608 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004609 } else {
4610 req.rss_rule = cpu_to_le16(0xffff);
4611 }
4612
4613 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4614 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004615 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4616 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4617 } else {
4618 req.cos_rule = cpu_to_le16(0xffff);
4619 }
4620
Michael Chanc0c050c2015-10-22 16:01:17 -04004621 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05004622 ring = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004623 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05004624 ring = vnic_id - 1;
Prashant Sreedharan76595192016-07-18 07:15:22 -04004625 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4626 ring = bp->rx_nr_rings - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004627
Michael Chanb81a90d2016-01-02 23:45:01 -05004628 grp_idx = bp->rx_ring[ring].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004629 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004630 req.lb_rule = cpu_to_le16(0xffff);
Michael Chan7b3af4f2018-10-14 07:02:54 -04004631vnic_mru:
Michael Chanc0c050c2015-10-22 16:01:17 -04004632 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4633 VLAN_HLEN);
4634
Michael Chan7b3af4f2018-10-14 07:02:54 -04004635 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chancf6645f2016-06-13 02:25:28 -04004636#ifdef CONFIG_BNXT_SRIOV
4637 if (BNXT_VF(bp))
4638 def_vlan = bp->vf.vlan;
4639#endif
4640 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
Michael Chanc0c050c2015-10-22 16:01:17 -04004641 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
Michael Chana588e452016-12-07 00:26:21 -05004642 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
Michael Chanabe93ad2018-03-31 13:54:08 -04004643 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
Michael Chanc0c050c2015-10-22 16:01:17 -04004644
4645 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4646}
4647
4648static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4649{
4650 u32 rc = 0;
4651
4652 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4653 struct hwrm_vnic_free_input req = {0};
4654
4655 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4656 req.vnic_id =
4657 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4658
4659 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4660 if (rc)
4661 return rc;
4662 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4663 }
4664 return rc;
4665}
4666
4667static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4668{
4669 u16 i;
4670
4671 for (i = 0; i < bp->nr_vnics; i++)
4672 bnxt_hwrm_vnic_free_one(bp, i);
4673}
4674
Michael Chanb81a90d2016-01-02 23:45:01 -05004675static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4676 unsigned int start_rx_ring_idx,
4677 unsigned int nr_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04004678{
Michael Chanb81a90d2016-01-02 23:45:01 -05004679 int rc = 0;
4680 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004681 struct hwrm_vnic_alloc_input req = {0};
4682 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan44c6f722018-10-14 07:02:53 -04004683 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4684
4685 if (bp->flags & BNXT_FLAG_CHIP_P5)
4686 goto vnic_no_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04004687
4688 /* map ring groups to this vnic */
Michael Chanb81a90d2016-01-02 23:45:01 -05004689 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4690 grp_idx = bp->rx_ring[i].bnapi->index;
4691 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004692 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05004693 j, nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004694 break;
4695 }
Michael Chan44c6f722018-10-14 07:02:53 -04004696 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004697 }
4698
Michael Chan44c6f722018-10-14 07:02:53 -04004699vnic_no_ring_grps:
4700 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4701 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004702 if (vnic_id == 0)
4703 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4704
4705 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4706
4707 mutex_lock(&bp->hwrm_cmd_lock);
4708 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4709 if (!rc)
Michael Chan44c6f722018-10-14 07:02:53 -04004710 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004711 mutex_unlock(&bp->hwrm_cmd_lock);
4712 return rc;
4713}
4714
Michael Chan8fdefd62016-12-29 12:13:36 -05004715static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4716{
4717 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4718 struct hwrm_vnic_qcaps_input req = {0};
4719 int rc;
4720
4721 if (bp->hwrm_spec_code < 0x10600)
4722 return 0;
4723
4724 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4725 mutex_lock(&bp->hwrm_cmd_lock);
4726 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4727 if (!rc) {
Michael Chanabe93ad2018-03-31 13:54:08 -04004728 u32 flags = le32_to_cpu(resp->flags);
4729
Michael Chan41e8d792018-10-14 07:02:48 -04004730 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4731 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
Michael Chan8fdefd62016-12-29 12:13:36 -05004732 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
Michael Chanabe93ad2018-03-31 13:54:08 -04004733 if (flags &
4734 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4735 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
Michael Chan8fdefd62016-12-29 12:13:36 -05004736 }
4737 mutex_unlock(&bp->hwrm_cmd_lock);
4738 return rc;
4739}
4740
Michael Chanc0c050c2015-10-22 16:01:17 -04004741static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4742{
4743 u16 i;
4744 u32 rc = 0;
4745
Michael Chan44c6f722018-10-14 07:02:53 -04004746 if (bp->flags & BNXT_FLAG_CHIP_P5)
4747 return 0;
4748
Michael Chanc0c050c2015-10-22 16:01:17 -04004749 mutex_lock(&bp->hwrm_cmd_lock);
4750 for (i = 0; i < bp->rx_nr_rings; i++) {
4751 struct hwrm_ring_grp_alloc_input req = {0};
4752 struct hwrm_ring_grp_alloc_output *resp =
4753 bp->hwrm_cmd_resp_addr;
Michael Chanb81a90d2016-01-02 23:45:01 -05004754 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004755
4756 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4757
Michael Chanb81a90d2016-01-02 23:45:01 -05004758 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4759 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4760 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4761 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004762
4763 rc = _hwrm_send_message(bp, &req, sizeof(req),
4764 HWRM_CMD_TIMEOUT);
4765 if (rc)
4766 break;
4767
Michael Chanb81a90d2016-01-02 23:45:01 -05004768 bp->grp_info[grp_idx].fw_grp_id =
4769 le32_to_cpu(resp->ring_group_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004770 }
4771 mutex_unlock(&bp->hwrm_cmd_lock);
4772 return rc;
4773}
4774
4775static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4776{
4777 u16 i;
4778 u32 rc = 0;
4779 struct hwrm_ring_grp_free_input req = {0};
4780
Michael Chan44c6f722018-10-14 07:02:53 -04004781 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
Michael Chanc0c050c2015-10-22 16:01:17 -04004782 return 0;
4783
4784 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4785
4786 mutex_lock(&bp->hwrm_cmd_lock);
4787 for (i = 0; i < bp->cp_nr_rings; i++) {
4788 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4789 continue;
4790 req.ring_group_id =
4791 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4792
4793 rc = _hwrm_send_message(bp, &req, sizeof(req),
4794 HWRM_CMD_TIMEOUT);
4795 if (rc)
4796 break;
4797 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4798 }
4799 mutex_unlock(&bp->hwrm_cmd_lock);
4800 return rc;
4801}
4802
4803static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4804 struct bnxt_ring_struct *ring,
Michael Chan9899bb52018-03-31 13:54:16 -04004805 u32 ring_type, u32 map_index)
Michael Chanc0c050c2015-10-22 16:01:17 -04004806{
4807 int rc = 0, err = 0;
4808 struct hwrm_ring_alloc_input req = {0};
4809 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan6fe19882018-10-14 07:02:41 -04004810 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
Michael Chan9899bb52018-03-31 13:54:16 -04004811 struct bnxt_ring_grp_info *grp_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04004812 u16 ring_id;
4813
4814 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4815
4816 req.enables = 0;
Michael Chan6fe19882018-10-14 07:02:41 -04004817 if (rmem->nr_pages > 1) {
4818 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
Michael Chanc0c050c2015-10-22 16:01:17 -04004819 /* Page size is in log2 units */
4820 req.page_size = BNXT_PAGE_SHIFT;
4821 req.page_tbl_depth = 1;
4822 } else {
Michael Chan6fe19882018-10-14 07:02:41 -04004823 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04004824 }
4825 req.fbo = 0;
4826 /* Association of ring index with doorbell index and MSIX number */
4827 req.logical_id = cpu_to_le16(map_index);
4828
4829 switch (ring_type) {
Michael Chan2c61d212018-10-14 07:02:50 -04004830 case HWRM_RING_ALLOC_TX: {
4831 struct bnxt_tx_ring_info *txr;
4832
4833 txr = container_of(ring, struct bnxt_tx_ring_info,
4834 tx_ring_struct);
Michael Chanc0c050c2015-10-22 16:01:17 -04004835 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4836 /* Association of transmit ring with completion ring */
Michael Chan9899bb52018-03-31 13:54:16 -04004837 grp_info = &bp->grp_info[ring->grp_idx];
Michael Chan2c61d212018-10-14 07:02:50 -04004838 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
Michael Chanc0c050c2015-10-22 16:01:17 -04004839 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
Michael Chan9899bb52018-03-31 13:54:16 -04004840 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004841 req.queue_id = cpu_to_le16(ring->queue_id);
4842 break;
Michael Chan2c61d212018-10-14 07:02:50 -04004843 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004844 case HWRM_RING_ALLOC_RX:
4845 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4846 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
Michael Chan23aefdd2018-10-14 07:02:51 -04004847 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4848 u16 flags = 0;
4849
4850 /* Association of rx ring with stats context */
4851 grp_info = &bp->grp_info[ring->grp_idx];
4852 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4853 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4854 req.enables |= cpu_to_le32(
4855 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4856 if (NET_IP_ALIGN == 2)
4857 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4858 req.flags = cpu_to_le16(flags);
4859 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004860 break;
4861 case HWRM_RING_ALLOC_AGG:
Michael Chan23aefdd2018-10-14 07:02:51 -04004862 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4863 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4864 /* Association of agg ring with rx ring */
4865 grp_info = &bp->grp_info[ring->grp_idx];
4866 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4867 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4868 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4869 req.enables |= cpu_to_le32(
4870 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4871 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4872 } else {
4873 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4874 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004875 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4876 break;
4877 case HWRM_RING_ALLOC_CMPL:
Michael Chanbac9a7e2017-02-12 19:18:10 -05004878 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
Michael Chanc0c050c2015-10-22 16:01:17 -04004879 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
Michael Chan23aefdd2018-10-14 07:02:51 -04004880 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4881 /* Association of cp ring with nq */
4882 grp_info = &bp->grp_info[map_index];
4883 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4884 req.cq_handle = cpu_to_le64(ring->handle);
4885 req.enables |= cpu_to_le32(
4886 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4887 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4888 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4889 }
4890 break;
4891 case HWRM_RING_ALLOC_NQ:
4892 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4893 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004894 if (bp->flags & BNXT_FLAG_USING_MSIX)
4895 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4896 break;
4897 default:
4898 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4899 ring_type);
4900 return -1;
4901 }
4902
4903 mutex_lock(&bp->hwrm_cmd_lock);
4904 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4905 err = le16_to_cpu(resp->error_code);
4906 ring_id = le16_to_cpu(resp->ring_id);
4907 mutex_unlock(&bp->hwrm_cmd_lock);
4908
4909 if (rc || err) {
Michael Chan2727c882018-04-26 17:44:35 -04004910 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4911 ring_type, rc, err);
4912 return -EIO;
Michael Chanc0c050c2015-10-22 16:01:17 -04004913 }
4914 ring->fw_ring_id = ring_id;
4915 return rc;
4916}
4917
Michael Chan486b5c22016-12-29 12:13:42 -05004918static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4919{
4920 int rc;
4921
4922 if (BNXT_PF(bp)) {
4923 struct hwrm_func_cfg_input req = {0};
4924
4925 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4926 req.fid = cpu_to_le16(0xffff);
4927 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4928 req.async_event_cr = cpu_to_le16(idx);
4929 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4930 } else {
4931 struct hwrm_func_vf_cfg_input req = {0};
4932
4933 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4934 req.enables =
4935 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4936 req.async_event_cr = cpu_to_le16(idx);
4937 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4938 }
4939 return rc;
4940}
4941
Michael Chan697197e2018-10-14 07:02:46 -04004942static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4943 u32 map_idx, u32 xid)
4944{
4945 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4946 if (BNXT_PF(bp))
4947 db->doorbell = bp->bar1 + 0x10000;
4948 else
4949 db->doorbell = bp->bar1 + 0x4000;
4950 switch (ring_type) {
4951 case HWRM_RING_ALLOC_TX:
4952 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4953 break;
4954 case HWRM_RING_ALLOC_RX:
4955 case HWRM_RING_ALLOC_AGG:
4956 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4957 break;
4958 case HWRM_RING_ALLOC_CMPL:
4959 db->db_key64 = DBR_PATH_L2;
4960 break;
4961 case HWRM_RING_ALLOC_NQ:
4962 db->db_key64 = DBR_PATH_L2;
4963 break;
4964 }
4965 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4966 } else {
4967 db->doorbell = bp->bar1 + map_idx * 0x80;
4968 switch (ring_type) {
4969 case HWRM_RING_ALLOC_TX:
4970 db->db_key32 = DB_KEY_TX;
4971 break;
4972 case HWRM_RING_ALLOC_RX:
4973 case HWRM_RING_ALLOC_AGG:
4974 db->db_key32 = DB_KEY_RX;
4975 break;
4976 case HWRM_RING_ALLOC_CMPL:
4977 db->db_key32 = DB_KEY_CP;
4978 break;
4979 }
4980 }
4981}
4982
Michael Chanc0c050c2015-10-22 16:01:17 -04004983static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4984{
4985 int i, rc = 0;
Michael Chan697197e2018-10-14 07:02:46 -04004986 u32 type;
Michael Chanc0c050c2015-10-22 16:01:17 -04004987
Michael Chan23aefdd2018-10-14 07:02:51 -04004988 if (bp->flags & BNXT_FLAG_CHIP_P5)
4989 type = HWRM_RING_ALLOC_NQ;
4990 else
4991 type = HWRM_RING_ALLOC_CMPL;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004992 for (i = 0; i < bp->cp_nr_rings; i++) {
4993 struct bnxt_napi *bnapi = bp->bnapi[i];
4994 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4995 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chan9899bb52018-03-31 13:54:16 -04004996 u32 map_idx = ring->map_idx;
Michael Chan5e66e352019-01-31 14:31:48 -05004997 unsigned int vector;
Michael Chanc0c050c2015-10-22 16:01:17 -04004998
Michael Chan5e66e352019-01-31 14:31:48 -05004999 vector = bp->irq_tbl[map_idx].vector;
5000 disable_irq_nosync(vector);
Michael Chan697197e2018-10-14 07:02:46 -04005001 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chan5e66e352019-01-31 14:31:48 -05005002 if (rc) {
5003 enable_irq(vector);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005004 goto err_out;
Michael Chan5e66e352019-01-31 14:31:48 -05005005 }
Michael Chan697197e2018-10-14 07:02:46 -04005006 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5007 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chan5e66e352019-01-31 14:31:48 -05005008 enable_irq(vector);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005009 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chan486b5c22016-12-29 12:13:42 -05005010
5011 if (!i) {
5012 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5013 if (rc)
5014 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5015 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005016 }
5017
Michael Chan697197e2018-10-14 07:02:46 -04005018 type = HWRM_RING_ALLOC_TX;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005019 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005020 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chan3e08b182018-10-14 07:02:52 -04005021 struct bnxt_ring_struct *ring;
5022 u32 map_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04005023
Michael Chan3e08b182018-10-14 07:02:52 -04005024 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5025 struct bnxt_napi *bnapi = txr->bnapi;
5026 struct bnxt_cp_ring_info *cpr, *cpr2;
5027 u32 type2 = HWRM_RING_ALLOC_CMPL;
5028
5029 cpr = &bnapi->cp_ring;
5030 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5031 ring = &cpr2->cp_ring_struct;
5032 ring->handle = BNXT_TX_HDL;
5033 map_idx = bnapi->index;
5034 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5035 if (rc)
5036 goto err_out;
5037 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5038 ring->fw_ring_id);
5039 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5040 }
5041 ring = &txr->tx_ring_struct;
5042 map_idx = i;
Michael Chan697197e2018-10-14 07:02:46 -04005043 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005044 if (rc)
5045 goto err_out;
Michael Chan697197e2018-10-14 07:02:46 -04005046 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04005047 }
5048
Michael Chan697197e2018-10-14 07:02:46 -04005049 type = HWRM_RING_ALLOC_RX;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005050 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005051 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005052 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chan3e08b182018-10-14 07:02:52 -04005053 struct bnxt_napi *bnapi = rxr->bnapi;
5054 u32 map_idx = bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04005055
Michael Chan697197e2018-10-14 07:02:46 -04005056 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005057 if (rc)
5058 goto err_out;
Michael Chan697197e2018-10-14 07:02:46 -04005059 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5060 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
Michael Chanb81a90d2016-01-02 23:45:01 -05005061 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
Michael Chan3e08b182018-10-14 07:02:52 -04005062 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5063 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5064 u32 type2 = HWRM_RING_ALLOC_CMPL;
5065 struct bnxt_cp_ring_info *cpr2;
5066
5067 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5068 ring = &cpr2->cp_ring_struct;
5069 ring->handle = BNXT_RX_HDL;
5070 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5071 if (rc)
5072 goto err_out;
5073 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5074 ring->fw_ring_id);
5075 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5076 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005077 }
5078
5079 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
Michael Chan697197e2018-10-14 07:02:46 -04005080 type = HWRM_RING_ALLOC_AGG;
Michael Chanc0c050c2015-10-22 16:01:17 -04005081 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005082 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005083 struct bnxt_ring_struct *ring =
5084 &rxr->rx_agg_ring_struct;
Michael Chan9899bb52018-03-31 13:54:16 -04005085 u32 grp_idx = ring->grp_idx;
Michael Chanb81a90d2016-01-02 23:45:01 -05005086 u32 map_idx = grp_idx + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005087
Michael Chan697197e2018-10-14 07:02:46 -04005088 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanc0c050c2015-10-22 16:01:17 -04005089 if (rc)
5090 goto err_out;
5091
Michael Chan697197e2018-10-14 07:02:46 -04005092 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5093 ring->fw_ring_id);
5094 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
Michael Chanb81a90d2016-01-02 23:45:01 -05005095 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005096 }
5097 }
5098err_out:
5099 return rc;
5100}
5101
5102static int hwrm_ring_free_send_msg(struct bnxt *bp,
5103 struct bnxt_ring_struct *ring,
5104 u32 ring_type, int cmpl_ring_id)
5105{
5106 int rc;
5107 struct hwrm_ring_free_input req = {0};
5108 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5109 u16 error_code;
5110
Prashant Sreedharan74608fc2016-01-28 03:11:20 -05005111 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04005112 req.ring_type = ring_type;
5113 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5114
5115 mutex_lock(&bp->hwrm_cmd_lock);
5116 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5117 error_code = le16_to_cpu(resp->error_code);
5118 mutex_unlock(&bp->hwrm_cmd_lock);
5119
5120 if (rc || error_code) {
Michael Chan2727c882018-04-26 17:44:35 -04005121 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5122 ring_type, rc, error_code);
5123 return -EIO;
Michael Chanc0c050c2015-10-22 16:01:17 -04005124 }
5125 return 0;
5126}
5127
Michael Chanedd0c2c2015-12-27 18:19:19 -05005128static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04005129{
Michael Chan23aefdd2018-10-14 07:02:51 -04005130 u32 type;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005131 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005132
5133 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05005134 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04005135
Michael Chanedd0c2c2015-12-27 18:19:19 -05005136 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005137 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005138 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04005139
Michael Chanedd0c2c2015-12-27 18:19:19 -05005140 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
Michael Chan1f833912019-04-25 22:31:52 -04005141 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5142
Michael Chanedd0c2c2015-12-27 18:19:19 -05005143 hwrm_ring_free_send_msg(bp, ring,
5144 RING_FREE_REQ_RING_TYPE_TX,
5145 close_path ? cmpl_ring_id :
5146 INVALID_HW_RING_ID);
5147 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005148 }
5149 }
5150
Michael Chanedd0c2c2015-12-27 18:19:19 -05005151 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005152 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005153 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05005154 u32 grp_idx = rxr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04005155
Michael Chanedd0c2c2015-12-27 18:19:19 -05005156 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
Michael Chan1f833912019-04-25 22:31:52 -04005157 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5158
Michael Chanedd0c2c2015-12-27 18:19:19 -05005159 hwrm_ring_free_send_msg(bp, ring,
5160 RING_FREE_REQ_RING_TYPE_RX,
5161 close_path ? cmpl_ring_id :
5162 INVALID_HW_RING_ID);
5163 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05005164 bp->grp_info[grp_idx].rx_fw_ring_id =
5165 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005166 }
5167 }
5168
Michael Chan23aefdd2018-10-14 07:02:51 -04005169 if (bp->flags & BNXT_FLAG_CHIP_P5)
5170 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5171 else
5172 type = RING_FREE_REQ_RING_TYPE_RX;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005173 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005174 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005175 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05005176 u32 grp_idx = rxr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04005177
Michael Chanedd0c2c2015-12-27 18:19:19 -05005178 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
Michael Chan1f833912019-04-25 22:31:52 -04005179 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5180
Michael Chan23aefdd2018-10-14 07:02:51 -04005181 hwrm_ring_free_send_msg(bp, ring, type,
Michael Chanedd0c2c2015-12-27 18:19:19 -05005182 close_path ? cmpl_ring_id :
5183 INVALID_HW_RING_ID);
5184 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05005185 bp->grp_info[grp_idx].agg_fw_ring_id =
5186 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005187 }
5188 }
5189
Michael Chan9d8bc092016-12-29 12:13:33 -05005190 /* The completion rings are about to be freed. After that the
5191 * IRQ doorbell will not work anymore. So we need to disable
5192 * IRQ here.
5193 */
5194 bnxt_disable_int_sync(bp);
5195
Michael Chan23aefdd2018-10-14 07:02:51 -04005196 if (bp->flags & BNXT_FLAG_CHIP_P5)
5197 type = RING_FREE_REQ_RING_TYPE_NQ;
5198 else
5199 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005200 for (i = 0; i < bp->cp_nr_rings; i++) {
5201 struct bnxt_napi *bnapi = bp->bnapi[i];
5202 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chan3e08b182018-10-14 07:02:52 -04005203 struct bnxt_ring_struct *ring;
5204 int j;
Michael Chanc0c050c2015-10-22 16:01:17 -04005205
Michael Chan3e08b182018-10-14 07:02:52 -04005206 for (j = 0; j < 2; j++) {
5207 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5208
5209 if (cpr2) {
5210 ring = &cpr2->cp_ring_struct;
5211 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5212 continue;
5213 hwrm_ring_free_send_msg(bp, ring,
5214 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5215 INVALID_HW_RING_ID);
5216 ring->fw_ring_id = INVALID_HW_RING_ID;
5217 }
5218 }
5219 ring = &cpr->cp_ring_struct;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005220 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
Michael Chan23aefdd2018-10-14 07:02:51 -04005221 hwrm_ring_free_send_msg(bp, ring, type,
Michael Chanedd0c2c2015-12-27 18:19:19 -05005222 INVALID_HW_RING_ID);
5223 ring->fw_ring_id = INVALID_HW_RING_ID;
5224 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005225 }
5226 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005227}
5228
Michael Chan41e8d792018-10-14 07:02:48 -04005229static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5230 bool shared);
5231
Michael Chan674f50a2018-01-17 03:21:09 -05005232static int bnxt_hwrm_get_rings(struct bnxt *bp)
5233{
5234 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5235 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5236 struct hwrm_func_qcfg_input req = {0};
5237 int rc;
5238
5239 if (bp->hwrm_spec_code < 0x10601)
5240 return 0;
5241
5242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5243 req.fid = cpu_to_le16(0xffff);
5244 mutex_lock(&bp->hwrm_cmd_lock);
5245 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5246 if (rc) {
5247 mutex_unlock(&bp->hwrm_cmd_lock);
5248 return -EIO;
5249 }
5250
5251 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
Michael Chanf1ca94d2018-08-05 16:51:53 -04005252 if (BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005253 u16 cp, stats;
5254
5255 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5256 hw_resc->resv_hw_ring_grps =
5257 le32_to_cpu(resp->alloc_hw_ring_grps);
5258 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5259 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5260 stats = le16_to_cpu(resp->alloc_stat_ctx);
Michael Chan75720e62018-12-09 07:01:00 -05005261 hw_resc->resv_irqs = cp;
Michael Chan41e8d792018-10-14 07:02:48 -04005262 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5263 int rx = hw_resc->resv_rx_rings;
5264 int tx = hw_resc->resv_tx_rings;
5265
5266 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5267 rx >>= 1;
5268 if (cp < (rx + tx)) {
5269 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5270 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5271 rx <<= 1;
5272 hw_resc->resv_rx_rings = rx;
5273 hw_resc->resv_tx_rings = tx;
5274 }
Michael Chan75720e62018-12-09 07:01:00 -05005275 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
Michael Chan41e8d792018-10-14 07:02:48 -04005276 hw_resc->resv_hw_ring_grps = rx;
5277 }
Michael Chan674f50a2018-01-17 03:21:09 -05005278 hw_resc->resv_cp_rings = cp;
Vasundhara Volam780baad2018-12-16 18:46:23 -05005279 hw_resc->resv_stat_ctxs = stats;
Michael Chan674f50a2018-01-17 03:21:09 -05005280 }
5281 mutex_unlock(&bp->hwrm_cmd_lock);
5282 return 0;
5283}
5284
Michael Chan391be5c2016-12-29 12:13:41 -05005285/* Caller must hold bp->hwrm_cmd_lock */
5286int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5287{
5288 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5289 struct hwrm_func_qcfg_input req = {0};
5290 int rc;
5291
5292 if (bp->hwrm_spec_code < 0x10601)
5293 return 0;
5294
5295 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5296 req.fid = cpu_to_le16(fid);
5297 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5298 if (!rc)
5299 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5300
5301 return rc;
5302}
5303
Michael Chan41e8d792018-10-14 07:02:48 -04005304static bool bnxt_rfs_supported(struct bnxt *bp);
5305
Michael Chan4ed50ef2018-03-09 23:46:03 -05005306static void
5307__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5308 int tx_rings, int rx_rings, int ring_grps,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005309 int cp_rings, int stats, int vnics)
Michael Chan391be5c2016-12-29 12:13:41 -05005310{
Michael Chan674f50a2018-01-17 03:21:09 -05005311 u32 enables = 0;
Michael Chan391be5c2016-12-29 12:13:41 -05005312
Michael Chan4ed50ef2018-03-09 23:46:03 -05005313 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5314 req->fid = cpu_to_le16(0xffff);
Michael Chan674f50a2018-01-17 03:21:09 -05005315 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
Michael Chan4ed50ef2018-03-09 23:46:03 -05005316 req->num_tx_rings = cpu_to_le16(tx_rings);
Michael Chanf1ca94d2018-08-05 16:51:53 -04005317 if (BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005318 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
Michael Chan3f93cd32019-04-25 22:31:54 -04005319 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005320 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5321 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5322 enables |= tx_rings + ring_grps ?
Michael Chan3f93cd32019-04-25 22:31:54 -04005323 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005324 enables |= rx_rings ?
5325 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5326 } else {
5327 enables |= cp_rings ?
Michael Chan3f93cd32019-04-25 22:31:54 -04005328 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005329 enables |= ring_grps ?
5330 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5331 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5332 }
Michael Chandbe80d42018-10-05 00:26:00 -04005333 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
Michael Chan674f50a2018-01-17 03:21:09 -05005334
Michael Chan4ed50ef2018-03-09 23:46:03 -05005335 req->num_rx_rings = cpu_to_le16(rx_rings);
Michael Chan41e8d792018-10-14 07:02:48 -04005336 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5337 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5338 req->num_msix = cpu_to_le16(cp_rings);
5339 req->num_rsscos_ctxs =
5340 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5341 } else {
5342 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5343 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5344 req->num_rsscos_ctxs = cpu_to_le16(1);
5345 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5346 bnxt_rfs_supported(bp))
5347 req->num_rsscos_ctxs =
5348 cpu_to_le16(ring_grps + 1);
5349 }
Vasundhara Volam780baad2018-12-16 18:46:23 -05005350 req->num_stat_ctxs = cpu_to_le16(stats);
Michael Chan4ed50ef2018-03-09 23:46:03 -05005351 req->num_vnics = cpu_to_le16(vnics);
Michael Chan674f50a2018-01-17 03:21:09 -05005352 }
Michael Chan4ed50ef2018-03-09 23:46:03 -05005353 req->enables = cpu_to_le32(enables);
5354}
5355
5356static void
5357__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5358 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5359 int rx_rings, int ring_grps, int cp_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005360 int stats, int vnics)
Michael Chan4ed50ef2018-03-09 23:46:03 -05005361{
5362 u32 enables = 0;
5363
5364 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5365 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005366 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5367 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
Michael Chan3f93cd32019-04-25 22:31:54 -04005368 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005369 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5370 enables |= tx_rings + ring_grps ?
Michael Chan3f93cd32019-04-25 22:31:54 -04005371 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005372 } else {
5373 enables |= cp_rings ?
Michael Chan3f93cd32019-04-25 22:31:54 -04005374 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005375 enables |= ring_grps ?
5376 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5377 }
Michael Chan4ed50ef2018-03-09 23:46:03 -05005378 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005379 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
Michael Chan4ed50ef2018-03-09 23:46:03 -05005380
Michael Chan41e8d792018-10-14 07:02:48 -04005381 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
Michael Chan4ed50ef2018-03-09 23:46:03 -05005382 req->num_tx_rings = cpu_to_le16(tx_rings);
5383 req->num_rx_rings = cpu_to_le16(rx_rings);
Michael Chan41e8d792018-10-14 07:02:48 -04005384 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5385 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5386 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5387 } else {
5388 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5389 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5390 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5391 }
Vasundhara Volam780baad2018-12-16 18:46:23 -05005392 req->num_stat_ctxs = cpu_to_le16(stats);
Michael Chan4ed50ef2018-03-09 23:46:03 -05005393 req->num_vnics = cpu_to_le16(vnics);
5394
5395 req->enables = cpu_to_le32(enables);
5396}
5397
5398static int
5399bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005400 int ring_grps, int cp_rings, int stats, int vnics)
Michael Chan4ed50ef2018-03-09 23:46:03 -05005401{
5402 struct hwrm_func_cfg_input req = {0};
5403 int rc;
5404
5405 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005406 cp_rings, stats, vnics);
Michael Chan4ed50ef2018-03-09 23:46:03 -05005407 if (!req.enables)
Michael Chan674f50a2018-01-17 03:21:09 -05005408 return 0;
5409
Michael Chan674f50a2018-01-17 03:21:09 -05005410 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5411 if (rc)
5412 return -ENOMEM;
5413
5414 if (bp->hwrm_spec_code < 0x10601)
5415 bp->hw_resc.resv_tx_rings = tx_rings;
5416
5417 rc = bnxt_hwrm_get_rings(bp);
5418 return rc;
5419}
5420
5421static int
5422bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005423 int ring_grps, int cp_rings, int stats, int vnics)
Michael Chan674f50a2018-01-17 03:21:09 -05005424{
5425 struct hwrm_func_vf_cfg_input req = {0};
Michael Chan674f50a2018-01-17 03:21:09 -05005426 int rc;
5427
Michael Chanf1ca94d2018-08-05 16:51:53 -04005428 if (!BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005429 bp->hw_resc.resv_tx_rings = tx_rings;
5430 return 0;
5431 }
5432
Michael Chan4ed50ef2018-03-09 23:46:03 -05005433 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005434 cp_rings, stats, vnics);
Michael Chan674f50a2018-01-17 03:21:09 -05005435 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5436 if (rc)
5437 return -ENOMEM;
5438
5439 rc = bnxt_hwrm_get_rings(bp);
5440 return rc;
5441}
5442
5443static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005444 int cp, int stat, int vnic)
Michael Chan674f50a2018-01-17 03:21:09 -05005445{
5446 if (BNXT_PF(bp))
Vasundhara Volam780baad2018-12-16 18:46:23 -05005447 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5448 vnic);
Michael Chan674f50a2018-01-17 03:21:09 -05005449 else
Vasundhara Volam780baad2018-12-16 18:46:23 -05005450 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5451 vnic);
Michael Chan674f50a2018-01-17 03:21:09 -05005452}
5453
Michael Chanb16b6892018-12-16 18:46:25 -05005454int bnxt_nq_rings_in_use(struct bnxt *bp)
Michael Chan08654eb2018-03-31 13:54:17 -04005455{
5456 int cp = bp->cp_nr_rings;
5457 int ulp_msix, ulp_base;
5458
5459 ulp_msix = bnxt_get_ulp_msix_num(bp);
5460 if (ulp_msix) {
5461 ulp_base = bnxt_get_ulp_msix_base(bp);
5462 cp += ulp_msix;
5463 if ((ulp_base + ulp_msix) > cp)
5464 cp = ulp_base + ulp_msix;
5465 }
5466 return cp;
5467}
5468
Michael Chanc0b8cda2018-12-09 07:01:01 -05005469static int bnxt_cp_rings_in_use(struct bnxt *bp)
5470{
5471 int cp;
5472
5473 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5474 return bnxt_nq_rings_in_use(bp);
5475
5476 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5477 return cp;
5478}
5479
Vasundhara Volam780baad2018-12-16 18:46:23 -05005480static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5481{
5482 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
5483}
5484
Michael Chan4e41dc52018-03-31 13:54:19 -04005485static bool bnxt_need_reserve_rings(struct bnxt *bp)
5486{
5487 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
Michael Chanfbcfc8e2018-03-31 13:54:20 -04005488 int cp = bnxt_cp_rings_in_use(bp);
Michael Chanc0b8cda2018-12-09 07:01:01 -05005489 int nq = bnxt_nq_rings_in_use(bp);
Vasundhara Volam780baad2018-12-16 18:46:23 -05005490 int rx = bp->rx_nr_rings, stat;
Michael Chan4e41dc52018-03-31 13:54:19 -04005491 int vnic = 1, grp = rx;
5492
5493 if (bp->hwrm_spec_code < 0x10601)
5494 return false;
5495
5496 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5497 return true;
5498
Michael Chan41e8d792018-10-14 07:02:48 -04005499 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
Michael Chan4e41dc52018-03-31 13:54:19 -04005500 vnic = rx + 1;
5501 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5502 rx <<= 1;
Vasundhara Volam780baad2018-12-16 18:46:23 -05005503 stat = bnxt_get_func_stat_ctxs(bp);
Michael Chanf1ca94d2018-08-05 16:51:53 -04005504 if (BNXT_NEW_RM(bp) &&
Michael Chan4e41dc52018-03-31 13:54:19 -04005505 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
Michael Chanc0b8cda2018-12-09 07:01:01 -05005506 hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
Vasundhara Volam780baad2018-12-16 18:46:23 -05005507 hw_resc->resv_stat_ctxs != stat ||
Michael Chan41e8d792018-10-14 07:02:48 -04005508 (hw_resc->resv_hw_ring_grps != grp &&
5509 !(bp->flags & BNXT_FLAG_CHIP_P5))))
Michael Chan4e41dc52018-03-31 13:54:19 -04005510 return true;
5511 return false;
5512}
5513
Michael Chan674f50a2018-01-17 03:21:09 -05005514static int __bnxt_reserve_rings(struct bnxt *bp)
5515{
5516 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
Michael Chanc0b8cda2018-12-09 07:01:01 -05005517 int cp = bnxt_nq_rings_in_use(bp);
Michael Chan674f50a2018-01-17 03:21:09 -05005518 int tx = bp->tx_nr_rings;
5519 int rx = bp->rx_nr_rings;
Michael Chan674f50a2018-01-17 03:21:09 -05005520 int grp, rx_rings, rc;
Vasundhara Volam780baad2018-12-16 18:46:23 -05005521 int vnic = 1, stat;
Michael Chan674f50a2018-01-17 03:21:09 -05005522 bool sh = false;
Michael Chan674f50a2018-01-17 03:21:09 -05005523
Michael Chan4e41dc52018-03-31 13:54:19 -04005524 if (!bnxt_need_reserve_rings(bp))
Michael Chan391be5c2016-12-29 12:13:41 -05005525 return 0;
5526
Michael Chan674f50a2018-01-17 03:21:09 -05005527 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5528 sh = true;
Michael Chan41e8d792018-10-14 07:02:48 -04005529 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
Michael Chan674f50a2018-01-17 03:21:09 -05005530 vnic = rx + 1;
5531 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5532 rx <<= 1;
Michael Chan674f50a2018-01-17 03:21:09 -05005533 grp = bp->rx_nr_rings;
Vasundhara Volam780baad2018-12-16 18:46:23 -05005534 stat = bnxt_get_func_stat_ctxs(bp);
Michael Chan391be5c2016-12-29 12:13:41 -05005535
Vasundhara Volam780baad2018-12-16 18:46:23 -05005536 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
Michael Chan391be5c2016-12-29 12:13:41 -05005537 if (rc)
5538 return rc;
5539
Michael Chan674f50a2018-01-17 03:21:09 -05005540 tx = hw_resc->resv_tx_rings;
Michael Chanf1ca94d2018-08-05 16:51:53 -04005541 if (BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005542 rx = hw_resc->resv_rx_rings;
Michael Chanc0b8cda2018-12-09 07:01:01 -05005543 cp = hw_resc->resv_irqs;
Michael Chan674f50a2018-01-17 03:21:09 -05005544 grp = hw_resc->resv_hw_ring_grps;
5545 vnic = hw_resc->resv_vnics;
Vasundhara Volam780baad2018-12-16 18:46:23 -05005546 stat = hw_resc->resv_stat_ctxs;
Michael Chan674f50a2018-01-17 03:21:09 -05005547 }
5548
5549 rx_rings = rx;
5550 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5551 if (rx >= 2) {
5552 rx_rings = rx >> 1;
5553 } else {
5554 if (netif_running(bp->dev))
5555 return -ENOMEM;
5556
5557 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5558 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5559 bp->dev->hw_features &= ~NETIF_F_LRO;
5560 bp->dev->features &= ~NETIF_F_LRO;
5561 bnxt_set_ring_params(bp);
5562 }
5563 }
5564 rx_rings = min_t(int, rx_rings, grp);
Vasundhara Volam780baad2018-12-16 18:46:23 -05005565 cp = min_t(int, cp, bp->cp_nr_rings);
5566 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5567 stat -= bnxt_get_ulp_stat_ctxs(bp);
5568 cp = min_t(int, cp, stat);
Michael Chan674f50a2018-01-17 03:21:09 -05005569 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5570 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5571 rx = rx_rings << 1;
5572 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5573 bp->tx_nr_rings = tx;
5574 bp->rx_nr_rings = rx_rings;
5575 bp->cp_nr_rings = cp;
5576
Vasundhara Volam780baad2018-12-16 18:46:23 -05005577 if (!tx || !rx || !cp || !grp || !vnic || !stat)
Michael Chan674f50a2018-01-17 03:21:09 -05005578 return -ENOMEM;
5579
Michael Chan391be5c2016-12-29 12:13:41 -05005580 return rc;
5581}
5582
Michael Chan8f23d632018-01-17 03:21:12 -05005583static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005584 int ring_grps, int cp_rings, int stats,
5585 int vnics)
Michael Chan98fdbe72017-08-28 13:40:26 -04005586{
Michael Chan8f23d632018-01-17 03:21:12 -05005587 struct hwrm_func_vf_cfg_input req = {0};
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005588 u32 flags;
Michael Chan98fdbe72017-08-28 13:40:26 -04005589 int rc;
5590
Michael Chanf1ca94d2018-08-05 16:51:53 -04005591 if (!BNXT_NEW_RM(bp))
Michael Chan98fdbe72017-08-28 13:40:26 -04005592 return 0;
5593
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005594 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005595 cp_rings, stats, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005596 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5597 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5598 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
Michael Chan8f23d632018-01-17 03:21:12 -05005599 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
Michael Chan41e8d792018-10-14 07:02:48 -04005600 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5601 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5602 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5603 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
Michael Chan98fdbe72017-08-28 13:40:26 -04005604
Michael Chan8f23d632018-01-17 03:21:12 -05005605 req.flags = cpu_to_le32(flags);
Michael Chan98fdbe72017-08-28 13:40:26 -04005606 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5607 if (rc)
5608 return -ENOMEM;
5609 return 0;
5610}
5611
Michael Chan8f23d632018-01-17 03:21:12 -05005612static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005613 int ring_grps, int cp_rings, int stats,
5614 int vnics)
Michael Chan8f23d632018-01-17 03:21:12 -05005615{
5616 struct hwrm_func_cfg_input req = {0};
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005617 u32 flags;
Michael Chan8f23d632018-01-17 03:21:12 -05005618 int rc;
5619
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005620 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005621 cp_rings, stats, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005622 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
Michael Chan41e8d792018-10-14 07:02:48 -04005623 if (BNXT_NEW_RM(bp)) {
Michael Chan8f23d632018-01-17 03:21:12 -05005624 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5625 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
Michael Chan8f23d632018-01-17 03:21:12 -05005626 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5627 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
Michael Chan41e8d792018-10-14 07:02:48 -04005628 if (bp->flags & BNXT_FLAG_CHIP_P5)
Michael Chan0b815022019-01-12 00:13:04 -05005629 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5630 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
Michael Chan41e8d792018-10-14 07:02:48 -04005631 else
5632 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5633 }
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005634
Michael Chan8f23d632018-01-17 03:21:12 -05005635 req.flags = cpu_to_le32(flags);
Michael Chan8f23d632018-01-17 03:21:12 -05005636 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5637 if (rc)
5638 return -ENOMEM;
5639 return 0;
5640}
5641
5642static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005643 int ring_grps, int cp_rings, int stats,
5644 int vnics)
Michael Chan8f23d632018-01-17 03:21:12 -05005645{
5646 if (bp->hwrm_spec_code < 0x10801)
5647 return 0;
5648
5649 if (BNXT_PF(bp))
5650 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005651 ring_grps, cp_rings, stats,
5652 vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005653
5654 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
Vasundhara Volam780baad2018-12-16 18:46:23 -05005655 cp_rings, stats, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005656}
5657
Michael Chan74706af2018-10-14 07:02:40 -04005658static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5659{
5660 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5661 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5662 struct hwrm_ring_aggint_qcaps_input req = {0};
5663 int rc;
5664
5665 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5666 coal_cap->num_cmpl_dma_aggr_max = 63;
5667 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5668 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5669 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5670 coal_cap->int_lat_tmr_min_max = 65535;
5671 coal_cap->int_lat_tmr_max_max = 65535;
5672 coal_cap->num_cmpl_aggr_int_max = 65535;
5673 coal_cap->timer_units = 80;
5674
5675 if (bp->hwrm_spec_code < 0x10902)
5676 return;
5677
5678 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5679 mutex_lock(&bp->hwrm_cmd_lock);
5680 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5681 if (!rc) {
5682 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
Michael Chan58590c82018-10-14 07:02:56 -04005683 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
Michael Chan74706af2018-10-14 07:02:40 -04005684 coal_cap->num_cmpl_dma_aggr_max =
5685 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5686 coal_cap->num_cmpl_dma_aggr_during_int_max =
5687 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5688 coal_cap->cmpl_aggr_dma_tmr_max =
5689 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5690 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5691 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5692 coal_cap->int_lat_tmr_min_max =
5693 le16_to_cpu(resp->int_lat_tmr_min_max);
5694 coal_cap->int_lat_tmr_max_max =
5695 le16_to_cpu(resp->int_lat_tmr_max_max);
5696 coal_cap->num_cmpl_aggr_int_max =
5697 le16_to_cpu(resp->num_cmpl_aggr_int_max);
5698 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5699 }
5700 mutex_unlock(&bp->hwrm_cmd_lock);
5701}
5702
5703static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5704{
5705 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5706
5707 return usec * 1000 / coal_cap->timer_units;
5708}
5709
5710static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5711 struct bnxt_coal *hw_coal,
Michael Chanbb053f52016-02-26 04:00:02 -05005712 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5713{
Michael Chan74706af2018-10-14 07:02:40 -04005714 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5715 u32 cmpl_params = coal_cap->cmpl_params;
5716 u16 val, tmr, max, flags = 0;
Michael Chanf8503962017-10-26 11:51:28 -04005717
5718 max = hw_coal->bufs_per_record * 128;
5719 if (hw_coal->budget)
5720 max = hw_coal->bufs_per_record * hw_coal->budget;
Michael Chan74706af2018-10-14 07:02:40 -04005721 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
Michael Chanf8503962017-10-26 11:51:28 -04005722
5723 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5724 req->num_cmpl_aggr_int = cpu_to_le16(val);
Michael Chanb153cbc2017-11-03 03:32:39 -04005725
Michael Chan74706af2018-10-14 07:02:40 -04005726 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
Michael Chanf8503962017-10-26 11:51:28 -04005727 req->num_cmpl_dma_aggr = cpu_to_le16(val);
5728
Michael Chan74706af2018-10-14 07:02:40 -04005729 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5730 coal_cap->num_cmpl_dma_aggr_during_int_max);
Michael Chanf8503962017-10-26 11:51:28 -04005731 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5732
Michael Chan74706af2018-10-14 07:02:40 -04005733 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5734 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
Michael Chanf8503962017-10-26 11:51:28 -04005735 req->int_lat_tmr_max = cpu_to_le16(tmr);
5736
5737 /* min timer set to 1/2 of interrupt timer */
Michael Chan74706af2018-10-14 07:02:40 -04005738 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5739 val = tmr / 2;
5740 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5741 req->int_lat_tmr_min = cpu_to_le16(val);
5742 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5743 }
Michael Chanf8503962017-10-26 11:51:28 -04005744
5745 /* buf timer set to 1/4 of interrupt timer */
Michael Chan74706af2018-10-14 07:02:40 -04005746 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
Michael Chanf8503962017-10-26 11:51:28 -04005747 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5748
Michael Chan74706af2018-10-14 07:02:40 -04005749 if (cmpl_params &
5750 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5751 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5752 val = clamp_t(u16, tmr, 1,
5753 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5754 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5755 req->enables |=
5756 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5757 }
Michael Chanf8503962017-10-26 11:51:28 -04005758
Michael Chan74706af2018-10-14 07:02:40 -04005759 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5760 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5761 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5762 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
Michael Chanf8503962017-10-26 11:51:28 -04005763 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
Michael Chanbb053f52016-02-26 04:00:02 -05005764 req->flags = cpu_to_le16(flags);
Michael Chan74706af2018-10-14 07:02:40 -04005765 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
Michael Chanbb053f52016-02-26 04:00:02 -05005766}
5767
Michael Chan58590c82018-10-14 07:02:56 -04005768/* Caller holds bp->hwrm_cmd_lock */
5769static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5770 struct bnxt_coal *hw_coal)
5771{
5772 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5773 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5774 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5775 u32 nq_params = coal_cap->nq_params;
5776 u16 tmr;
5777
5778 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5779 return 0;
5780
5781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5782 -1, -1);
5783 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5784 req.flags =
5785 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5786
5787 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5788 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5789 req.int_lat_tmr_min = cpu_to_le16(tmr);
5790 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5791 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5792}
5793
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005794int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5795{
5796 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5797 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5798 struct bnxt_coal coal;
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005799
5800 /* Tick values in micro seconds.
5801 * 1 coal_buf x bufs_per_record = 1 completion record.
5802 */
5803 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5804
5805 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5806 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5807
5808 if (!bnapi->rx_ring)
5809 return -ENODEV;
5810
5811 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5812 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5813
Michael Chan74706af2018-10-14 07:02:40 -04005814 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005815
Michael Chan2c61d212018-10-14 07:02:50 -04005816 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005817
5818 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5819 HWRM_CMD_TIMEOUT);
5820}
5821
Michael Chanc0c050c2015-10-22 16:01:17 -04005822int bnxt_hwrm_set_coal(struct bnxt *bp)
5823{
5824 int i, rc = 0;
Michael Chandfc9c942016-02-26 04:00:03 -05005825 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5826 req_tx = {0}, *req;
Michael Chanc0c050c2015-10-22 16:01:17 -04005827
Michael Chandfc9c942016-02-26 04:00:03 -05005828 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5829 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5830 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5831 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04005832
Michael Chan74706af2018-10-14 07:02:40 -04005833 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5834 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
Michael Chanc0c050c2015-10-22 16:01:17 -04005835
5836 mutex_lock(&bp->hwrm_cmd_lock);
5837 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chandfc9c942016-02-26 04:00:03 -05005838 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chan58590c82018-10-14 07:02:56 -04005839 struct bnxt_coal *hw_coal;
Michael Chan2c61d212018-10-14 07:02:50 -04005840 u16 ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005841
Michael Chandfc9c942016-02-26 04:00:03 -05005842 req = &req_rx;
Michael Chan2c61d212018-10-14 07:02:50 -04005843 if (!bnapi->rx_ring) {
5844 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
Michael Chandfc9c942016-02-26 04:00:03 -05005845 req = &req_tx;
Michael Chan2c61d212018-10-14 07:02:50 -04005846 } else {
5847 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5848 }
5849 req->ring_id = cpu_to_le16(ring_id);
Michael Chandfc9c942016-02-26 04:00:03 -05005850
5851 rc = _hwrm_send_message(bp, req, sizeof(*req),
Michael Chanc0c050c2015-10-22 16:01:17 -04005852 HWRM_CMD_TIMEOUT);
5853 if (rc)
5854 break;
Michael Chan58590c82018-10-14 07:02:56 -04005855
5856 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5857 continue;
5858
5859 if (bnapi->rx_ring && bnapi->tx_ring) {
5860 req = &req_tx;
5861 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5862 req->ring_id = cpu_to_le16(ring_id);
5863 rc = _hwrm_send_message(bp, req, sizeof(*req),
5864 HWRM_CMD_TIMEOUT);
5865 if (rc)
5866 break;
5867 }
5868 if (bnapi->rx_ring)
5869 hw_coal = &bp->rx_coal;
5870 else
5871 hw_coal = &bp->tx_coal;
5872 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
Michael Chanc0c050c2015-10-22 16:01:17 -04005873 }
5874 mutex_unlock(&bp->hwrm_cmd_lock);
5875 return rc;
5876}
5877
5878static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5879{
5880 int rc = 0, i;
5881 struct hwrm_stat_ctx_free_input req = {0};
5882
5883 if (!bp->bnapi)
5884 return 0;
5885
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04005886 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5887 return 0;
5888
Michael Chanc0c050c2015-10-22 16:01:17 -04005889 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5890
5891 mutex_lock(&bp->hwrm_cmd_lock);
5892 for (i = 0; i < bp->cp_nr_rings; i++) {
5893 struct bnxt_napi *bnapi = bp->bnapi[i];
5894 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5895
5896 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5897 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5898
5899 rc = _hwrm_send_message(bp, &req, sizeof(req),
5900 HWRM_CMD_TIMEOUT);
5901 if (rc)
5902 break;
5903
5904 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5905 }
5906 }
5907 mutex_unlock(&bp->hwrm_cmd_lock);
5908 return rc;
5909}
5910
5911static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5912{
5913 int rc = 0, i;
5914 struct hwrm_stat_ctx_alloc_input req = {0};
5915 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5916
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04005917 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5918 return 0;
5919
Michael Chanc0c050c2015-10-22 16:01:17 -04005920 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5921
Michael Chan51f30782016-07-01 18:46:29 -04005922 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
Michael Chanc0c050c2015-10-22 16:01:17 -04005923
5924 mutex_lock(&bp->hwrm_cmd_lock);
5925 for (i = 0; i < bp->cp_nr_rings; i++) {
5926 struct bnxt_napi *bnapi = bp->bnapi[i];
5927 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5928
5929 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5930
5931 rc = _hwrm_send_message(bp, &req, sizeof(req),
5932 HWRM_CMD_TIMEOUT);
5933 if (rc)
5934 break;
5935
5936 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5937
5938 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5939 }
5940 mutex_unlock(&bp->hwrm_cmd_lock);
Pan Bian89aa8442016-12-03 17:56:17 +08005941 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005942}
5943
Michael Chancf6645f2016-06-13 02:25:28 -04005944static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5945{
5946 struct hwrm_func_qcfg_input req = {0};
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005947 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan9315edc2017-07-24 12:34:25 -04005948 u16 flags;
Michael Chancf6645f2016-06-13 02:25:28 -04005949 int rc;
5950
5951 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5952 req.fid = cpu_to_le16(0xffff);
5953 mutex_lock(&bp->hwrm_cmd_lock);
5954 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5955 if (rc)
5956 goto func_qcfg_exit;
5957
5958#ifdef CONFIG_BNXT_SRIOV
5959 if (BNXT_VF(bp)) {
Michael Chancf6645f2016-06-13 02:25:28 -04005960 struct bnxt_vf_info *vf = &bp->vf;
5961
5962 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5963 }
5964#endif
Michael Chan9315edc2017-07-24 12:34:25 -04005965 flags = le16_to_cpu(resp->flags);
5966 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5967 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
Michael Chan97381a12018-08-05 16:51:54 -04005968 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
Michael Chan9315edc2017-07-24 12:34:25 -04005969 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
Michael Chan97381a12018-08-05 16:51:54 -04005970 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
Deepak Khungar9e54e322017-04-21 20:11:26 -04005971 }
Michael Chan9315edc2017-07-24 12:34:25 -04005972 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5973 bp->flags |= BNXT_FLAG_MULTI_HOST;
Michael Chanbc39f882017-03-08 18:44:34 -05005974
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005975 switch (resp->port_partition_type) {
5976 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5977 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5978 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5979 bp->port_partition_type = resp->port_partition_type;
5980 break;
5981 }
Michael Chan32e8239c2017-07-24 12:34:21 -04005982 if (bp->hwrm_spec_code < 0x10707 ||
5983 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5984 bp->br_mode = BRIDGE_MODE_VEB;
5985 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5986 bp->br_mode = BRIDGE_MODE_VEPA;
5987 else
5988 bp->br_mode = BRIDGE_MODE_UNDEF;
Michael Chancf6645f2016-06-13 02:25:28 -04005989
Michael Chan7eb9bb32017-10-26 11:51:25 -04005990 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5991 if (!bp->max_mtu)
5992 bp->max_mtu = BNXT_MAX_MTU;
5993
Michael Chancf6645f2016-06-13 02:25:28 -04005994func_qcfg_exit:
5995 mutex_unlock(&bp->hwrm_cmd_lock);
5996 return rc;
5997}
5998
Michael Chan98f04cf2018-10-14 07:02:43 -04005999static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6000{
6001 struct hwrm_func_backing_store_qcaps_input req = {0};
6002 struct hwrm_func_backing_store_qcaps_output *resp =
6003 bp->hwrm_cmd_resp_addr;
6004 int rc;
6005
6006 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6007 return 0;
6008
6009 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6010 mutex_lock(&bp->hwrm_cmd_lock);
6011 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6012 if (!rc) {
6013 struct bnxt_ctx_pg_info *ctx_pg;
6014 struct bnxt_ctx_mem_info *ctx;
6015 int i;
6016
6017 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6018 if (!ctx) {
6019 rc = -ENOMEM;
6020 goto ctx_err;
6021 }
6022 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6023 if (!ctx_pg) {
6024 kfree(ctx);
6025 rc = -ENOMEM;
6026 goto ctx_err;
6027 }
6028 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6029 ctx->tqm_mem[i] = ctx_pg;
6030
6031 bp->ctx = ctx;
6032 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6033 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6034 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6035 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6036 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6037 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6038 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6039 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6040 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6041 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6042 ctx->vnic_max_vnic_entries =
6043 le16_to_cpu(resp->vnic_max_vnic_entries);
6044 ctx->vnic_max_ring_table_entries =
6045 le16_to_cpu(resp->vnic_max_ring_table_entries);
6046 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6047 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6048 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6049 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6050 ctx->tqm_min_entries_per_ring =
6051 le32_to_cpu(resp->tqm_min_entries_per_ring);
6052 ctx->tqm_max_entries_per_ring =
6053 le32_to_cpu(resp->tqm_max_entries_per_ring);
6054 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6055 if (!ctx->tqm_entries_multiple)
6056 ctx->tqm_entries_multiple = 1;
6057 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6058 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6059 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6060 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6061 } else {
6062 rc = 0;
6063 }
6064ctx_err:
6065 mutex_unlock(&bp->hwrm_cmd_lock);
6066 return rc;
6067}
6068
Michael Chan1b9394e2018-10-14 07:02:44 -04006069static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6070 __le64 *pg_dir)
6071{
6072 u8 pg_size = 0;
6073
6074 if (BNXT_PAGE_SHIFT == 13)
6075 pg_size = 1 << 4;
6076 else if (BNXT_PAGE_SIZE == 16)
6077 pg_size = 2 << 4;
6078
6079 *pg_attr = pg_size;
Michael Chan08fe9d12018-12-20 03:38:50 -05006080 if (rmem->depth >= 1) {
6081 if (rmem->depth == 2)
6082 *pg_attr |= 2;
6083 else
6084 *pg_attr |= 1;
Michael Chan1b9394e2018-10-14 07:02:44 -04006085 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6086 } else {
6087 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6088 }
6089}
6090
6091#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6092 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6093 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6094 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6095 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6096 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6097
6098static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6099{
6100 struct hwrm_func_backing_store_cfg_input req = {0};
6101 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6102 struct bnxt_ctx_pg_info *ctx_pg;
6103 __le32 *num_entries;
6104 __le64 *pg_dir;
6105 u8 *pg_attr;
6106 int i, rc;
6107 u32 ena;
6108
6109 if (!ctx)
6110 return 0;
6111
6112 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6113 req.enables = cpu_to_le32(enables);
6114
6115 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6116 ctx_pg = &ctx->qp_mem;
6117 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6118 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6119 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6120 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6121 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6122 &req.qpc_pg_size_qpc_lvl,
6123 &req.qpc_page_dir);
6124 }
6125 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6126 ctx_pg = &ctx->srq_mem;
6127 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6128 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6129 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6130 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6131 &req.srq_pg_size_srq_lvl,
6132 &req.srq_page_dir);
6133 }
6134 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6135 ctx_pg = &ctx->cq_mem;
6136 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6137 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6138 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6139 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6140 &req.cq_page_dir);
6141 }
6142 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6143 ctx_pg = &ctx->vnic_mem;
6144 req.vnic_num_vnic_entries =
6145 cpu_to_le16(ctx->vnic_max_vnic_entries);
6146 req.vnic_num_ring_table_entries =
6147 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6148 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6149 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6150 &req.vnic_pg_size_vnic_lvl,
6151 &req.vnic_page_dir);
6152 }
6153 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6154 ctx_pg = &ctx->stat_mem;
6155 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6156 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6157 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6158 &req.stat_pg_size_stat_lvl,
6159 &req.stat_page_dir);
6160 }
Michael Chancf6daed2018-12-20 03:38:51 -05006161 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6162 ctx_pg = &ctx->mrav_mem;
6163 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6164 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6165 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6166 &req.mrav_pg_size_mrav_lvl,
6167 &req.mrav_page_dir);
6168 }
6169 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6170 ctx_pg = &ctx->tim_mem;
6171 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6172 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6173 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6174 &req.tim_pg_size_tim_lvl,
6175 &req.tim_page_dir);
6176 }
Michael Chan1b9394e2018-10-14 07:02:44 -04006177 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6178 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6179 pg_dir = &req.tqm_sp_page_dir,
6180 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6181 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6182 if (!(enables & ena))
6183 continue;
6184
6185 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6186 ctx_pg = ctx->tqm_mem[i];
6187 *num_entries = cpu_to_le32(ctx_pg->entries);
6188 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6189 }
6190 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6191 if (rc)
6192 rc = -EIO;
6193 return rc;
6194}
6195
Michael Chan98f04cf2018-10-14 07:02:43 -04006196static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
Michael Chan08fe9d12018-12-20 03:38:50 -05006197 struct bnxt_ctx_pg_info *ctx_pg)
Michael Chan98f04cf2018-10-14 07:02:43 -04006198{
6199 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6200
Michael Chan98f04cf2018-10-14 07:02:43 -04006201 rmem->page_size = BNXT_PAGE_SIZE;
6202 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6203 rmem->dma_arr = ctx_pg->ctx_dma_arr;
Michael Chan1b9394e2018-10-14 07:02:44 -04006204 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
Michael Chan08fe9d12018-12-20 03:38:50 -05006205 if (rmem->depth >= 1)
6206 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
Michael Chan98f04cf2018-10-14 07:02:43 -04006207 return bnxt_alloc_ring(bp, rmem);
6208}
6209
Michael Chan08fe9d12018-12-20 03:38:50 -05006210static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6211 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6212 u8 depth)
6213{
6214 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6215 int rc;
6216
6217 if (!mem_size)
6218 return 0;
6219
6220 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6221 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6222 ctx_pg->nr_pages = 0;
6223 return -EINVAL;
6224 }
6225 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6226 int nr_tbls, i;
6227
6228 rmem->depth = 2;
6229 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6230 GFP_KERNEL);
6231 if (!ctx_pg->ctx_pg_tbl)
6232 return -ENOMEM;
6233 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6234 rmem->nr_pages = nr_tbls;
6235 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6236 if (rc)
6237 return rc;
6238 for (i = 0; i < nr_tbls; i++) {
6239 struct bnxt_ctx_pg_info *pg_tbl;
6240
6241 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6242 if (!pg_tbl)
6243 return -ENOMEM;
6244 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6245 rmem = &pg_tbl->ring_mem;
6246 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6247 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6248 rmem->depth = 1;
6249 rmem->nr_pages = MAX_CTX_PAGES;
Michael Chan6ef982d2019-01-12 00:13:05 -05006250 if (i == (nr_tbls - 1)) {
6251 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6252
6253 if (rem)
6254 rmem->nr_pages = rem;
6255 }
Michael Chan08fe9d12018-12-20 03:38:50 -05006256 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6257 if (rc)
6258 break;
6259 }
6260 } else {
6261 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6262 if (rmem->nr_pages > 1 || depth)
6263 rmem->depth = 1;
6264 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6265 }
6266 return rc;
6267}
6268
6269static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6270 struct bnxt_ctx_pg_info *ctx_pg)
6271{
6272 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6273
6274 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6275 ctx_pg->ctx_pg_tbl) {
6276 int i, nr_tbls = rmem->nr_pages;
6277
6278 for (i = 0; i < nr_tbls; i++) {
6279 struct bnxt_ctx_pg_info *pg_tbl;
6280 struct bnxt_ring_mem_info *rmem2;
6281
6282 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6283 if (!pg_tbl)
6284 continue;
6285 rmem2 = &pg_tbl->ring_mem;
6286 bnxt_free_ring(bp, rmem2);
6287 ctx_pg->ctx_pg_arr[i] = NULL;
6288 kfree(pg_tbl);
6289 ctx_pg->ctx_pg_tbl[i] = NULL;
6290 }
6291 kfree(ctx_pg->ctx_pg_tbl);
6292 ctx_pg->ctx_pg_tbl = NULL;
6293 }
6294 bnxt_free_ring(bp, rmem);
6295 ctx_pg->nr_pages = 0;
6296}
6297
Michael Chan98f04cf2018-10-14 07:02:43 -04006298static void bnxt_free_ctx_mem(struct bnxt *bp)
6299{
6300 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6301 int i;
6302
6303 if (!ctx)
6304 return;
6305
6306 if (ctx->tqm_mem[0]) {
6307 for (i = 0; i < bp->max_q + 1; i++)
Michael Chan08fe9d12018-12-20 03:38:50 -05006308 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
Michael Chan98f04cf2018-10-14 07:02:43 -04006309 kfree(ctx->tqm_mem[0]);
6310 ctx->tqm_mem[0] = NULL;
6311 }
6312
Michael Chancf6daed2018-12-20 03:38:51 -05006313 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6314 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
Michael Chan08fe9d12018-12-20 03:38:50 -05006315 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6316 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6317 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6318 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6319 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
Michael Chan98f04cf2018-10-14 07:02:43 -04006320 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6321}
6322
6323static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6324{
6325 struct bnxt_ctx_pg_info *ctx_pg;
6326 struct bnxt_ctx_mem_info *ctx;
Michael Chan1b9394e2018-10-14 07:02:44 -04006327 u32 mem_size, ena, entries;
Michael Chancf6daed2018-12-20 03:38:51 -05006328 u32 extra_srqs = 0;
6329 u32 extra_qps = 0;
6330 u8 pg_lvl = 1;
Michael Chan98f04cf2018-10-14 07:02:43 -04006331 int i, rc;
6332
6333 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6334 if (rc) {
6335 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6336 rc);
6337 return rc;
6338 }
6339 ctx = bp->ctx;
6340 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6341 return 0;
6342
Michael Chancf6daed2018-12-20 03:38:51 -05006343 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6344 pg_lvl = 2;
6345 extra_qps = 65536;
6346 extra_srqs = 8192;
6347 }
6348
Michael Chan98f04cf2018-10-14 07:02:43 -04006349 ctx_pg = &ctx->qp_mem;
Michael Chancf6daed2018-12-20 03:38:51 -05006350 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6351 extra_qps;
Michael Chan98f04cf2018-10-14 07:02:43 -04006352 mem_size = ctx->qp_entry_size * ctx_pg->entries;
Michael Chancf6daed2018-12-20 03:38:51 -05006353 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
Michael Chan98f04cf2018-10-14 07:02:43 -04006354 if (rc)
6355 return rc;
6356
6357 ctx_pg = &ctx->srq_mem;
Michael Chancf6daed2018-12-20 03:38:51 -05006358 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
Michael Chan98f04cf2018-10-14 07:02:43 -04006359 mem_size = ctx->srq_entry_size * ctx_pg->entries;
Michael Chancf6daed2018-12-20 03:38:51 -05006360 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
Michael Chan98f04cf2018-10-14 07:02:43 -04006361 if (rc)
6362 return rc;
6363
6364 ctx_pg = &ctx->cq_mem;
Michael Chancf6daed2018-12-20 03:38:51 -05006365 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
Michael Chan98f04cf2018-10-14 07:02:43 -04006366 mem_size = ctx->cq_entry_size * ctx_pg->entries;
Michael Chancf6daed2018-12-20 03:38:51 -05006367 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
Michael Chan98f04cf2018-10-14 07:02:43 -04006368 if (rc)
6369 return rc;
6370
6371 ctx_pg = &ctx->vnic_mem;
6372 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6373 ctx->vnic_max_ring_table_entries;
6374 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
Michael Chan08fe9d12018-12-20 03:38:50 -05006375 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
Michael Chan98f04cf2018-10-14 07:02:43 -04006376 if (rc)
6377 return rc;
6378
6379 ctx_pg = &ctx->stat_mem;
6380 ctx_pg->entries = ctx->stat_max_entries;
6381 mem_size = ctx->stat_entry_size * ctx_pg->entries;
Michael Chan08fe9d12018-12-20 03:38:50 -05006382 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
Michael Chan98f04cf2018-10-14 07:02:43 -04006383 if (rc)
6384 return rc;
6385
Michael Chancf6daed2018-12-20 03:38:51 -05006386 ena = 0;
6387 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6388 goto skip_rdma;
6389
6390 ctx_pg = &ctx->mrav_mem;
6391 ctx_pg->entries = extra_qps * 4;
6392 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6393 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6394 if (rc)
6395 return rc;
6396 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6397
6398 ctx_pg = &ctx->tim_mem;
6399 ctx_pg->entries = ctx->qp_mem.entries;
6400 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6401 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6402 if (rc)
6403 return rc;
6404 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6405
6406skip_rdma:
6407 entries = ctx->qp_max_l2_entries + extra_qps;
Michael Chan98f04cf2018-10-14 07:02:43 -04006408 entries = roundup(entries, ctx->tqm_entries_multiple);
6409 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6410 ctx->tqm_max_entries_per_ring);
Michael Chancf6daed2018-12-20 03:38:51 -05006411 for (i = 0; i < bp->max_q + 1; i++) {
Michael Chan98f04cf2018-10-14 07:02:43 -04006412 ctx_pg = ctx->tqm_mem[i];
6413 ctx_pg->entries = entries;
6414 mem_size = ctx->tqm_entry_size * entries;
Michael Chan08fe9d12018-12-20 03:38:50 -05006415 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
Michael Chan98f04cf2018-10-14 07:02:43 -04006416 if (rc)
6417 return rc;
Michael Chan1b9394e2018-10-14 07:02:44 -04006418 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
Michael Chan98f04cf2018-10-14 07:02:43 -04006419 }
Michael Chan1b9394e2018-10-14 07:02:44 -04006420 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6421 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6422 if (rc)
6423 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6424 rc);
6425 else
6426 ctx->flags |= BNXT_CTX_FLAG_INITED;
6427
Michael Chan98f04cf2018-10-14 07:02:43 -04006428 return 0;
6429}
6430
Michael Chandb4723b2018-03-31 13:54:13 -04006431int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006432{
6433 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6434 struct hwrm_func_resource_qcaps_input req = {0};
6435 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6436 int rc;
6437
6438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6439 req.fid = cpu_to_le16(0xffff);
6440
6441 mutex_lock(&bp->hwrm_cmd_lock);
Jonathan Toppins351cbde2018-12-12 11:58:51 -05006442 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6443 HWRM_CMD_TIMEOUT);
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006444 if (rc) {
6445 rc = -EIO;
6446 goto hwrm_func_resc_qcaps_exit;
6447 }
6448
Michael Chandb4723b2018-03-31 13:54:13 -04006449 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6450 if (!all)
6451 goto hwrm_func_resc_qcaps_exit;
6452
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006453 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6454 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6455 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6456 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6457 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6458 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6459 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6460 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6461 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6462 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6463 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6464 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6465 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6466 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6467 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6468 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6469
Michael Chan9c1fabd2018-10-14 07:02:47 -04006470 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6471 u16 max_msix = le16_to_cpu(resp->max_msix);
6472
Michael Chanf7588cd2018-12-16 18:46:19 -05006473 hw_resc->max_nqs = max_msix;
Michael Chan9c1fabd2018-10-14 07:02:47 -04006474 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6475 }
6476
Michael Chan4673d662018-01-17 03:21:11 -05006477 if (BNXT_PF(bp)) {
6478 struct bnxt_pf_info *pf = &bp->pf;
6479
6480 pf->vf_resv_strategy =
6481 le16_to_cpu(resp->vf_reservation_strategy);
Michael Chanbf827362018-08-05 16:51:50 -04006482 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
Michael Chan4673d662018-01-17 03:21:11 -05006483 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6484 }
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006485hwrm_func_resc_qcaps_exit:
6486 mutex_unlock(&bp->hwrm_cmd_lock);
6487 return rc;
6488}
6489
6490static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006491{
6492 int rc = 0;
6493 struct hwrm_func_qcaps_input req = {0};
6494 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan6a4f2942018-01-17 03:21:06 -05006495 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6496 u32 flags;
Michael Chanc0c050c2015-10-22 16:01:17 -04006497
6498 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6499 req.fid = cpu_to_le16(0xffff);
6500
6501 mutex_lock(&bp->hwrm_cmd_lock);
6502 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6503 if (rc)
6504 goto hwrm_func_qcaps_exit;
6505
Michael Chan6a4f2942018-01-17 03:21:06 -05006506 flags = le32_to_cpu(resp->flags);
6507 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
Michael Chane4060d32016-12-07 00:26:19 -05006508 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
Michael Chan6a4f2942018-01-17 03:21:06 -05006509 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
Michael Chane4060d32016-12-07 00:26:19 -05006510 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6511
Michael Chan7cc5a202016-09-19 03:58:05 -04006512 bp->tx_push_thresh = 0;
Michael Chan6a4f2942018-01-17 03:21:06 -05006513 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
Michael Chan7cc5a202016-09-19 03:58:05 -04006514 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6515
Michael Chan6a4f2942018-01-17 03:21:06 -05006516 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6517 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6518 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6519 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6520 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6521 if (!hw_resc->max_hw_ring_grps)
6522 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6523 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6524 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6525 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6526
Michael Chanc0c050c2015-10-22 16:01:17 -04006527 if (BNXT_PF(bp)) {
6528 struct bnxt_pf_info *pf = &bp->pf;
6529
6530 pf->fw_fid = le16_to_cpu(resp->fid);
6531 pf->port_id = le16_to_cpu(resp->port_id);
Michael Chan87027db2016-07-01 18:46:28 -04006532 bp->dev->dev_port = pf->port_id;
Michael Chan11f15ed2016-04-05 14:08:55 -04006533 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04006534 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6535 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6536 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6537 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6538 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6539 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6540 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6541 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
Michael Chan6a4f2942018-01-17 03:21:06 -05006542 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
Michael Chanc1ef1462017-04-04 18:14:07 -04006543 bp->flags |= BNXT_FLAG_WOL_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04006544 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04006545#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04006546 struct bnxt_vf_info *vf = &bp->vf;
6547
6548 vf->fw_fid = le16_to_cpu(resp->fid);
Michael Chan7cc5a202016-09-19 03:58:05 -04006549 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chan379a80a2015-10-23 15:06:19 -04006550#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04006551 }
6552
Michael Chanc0c050c2015-10-22 16:01:17 -04006553hwrm_func_qcaps_exit:
6554 mutex_unlock(&bp->hwrm_cmd_lock);
6555 return rc;
6556}
6557
Michael Chan804fba42018-12-09 07:00:59 -05006558static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6559
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006560static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6561{
6562 int rc;
6563
6564 rc = __bnxt_hwrm_func_qcaps(bp);
6565 if (rc)
6566 return rc;
Michael Chan804fba42018-12-09 07:00:59 -05006567 rc = bnxt_hwrm_queue_qportcfg(bp);
6568 if (rc) {
6569 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6570 return rc;
6571 }
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006572 if (bp->hwrm_spec_code >= 0x10803) {
Michael Chan98f04cf2018-10-14 07:02:43 -04006573 rc = bnxt_alloc_ctx_mem(bp);
6574 if (rc)
6575 return rc;
Michael Chandb4723b2018-03-31 13:54:13 -04006576 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006577 if (!rc)
Michael Chan97381a12018-08-05 16:51:54 -04006578 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006579 }
6580 return 0;
6581}
6582
Michael Chanc0c050c2015-10-22 16:01:17 -04006583static int bnxt_hwrm_func_reset(struct bnxt *bp)
6584{
6585 struct hwrm_func_reset_input req = {0};
6586
6587 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6588 req.enables = 0;
6589
6590 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6591}
6592
6593static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6594{
6595 int rc = 0;
6596 struct hwrm_queue_qportcfg_input req = {0};
6597 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chanaabfc012018-08-05 16:51:58 -04006598 u8 i, j, *qptr;
6599 bool no_rdma;
Michael Chanc0c050c2015-10-22 16:01:17 -04006600
6601 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6602
6603 mutex_lock(&bp->hwrm_cmd_lock);
6604 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6605 if (rc)
6606 goto qportcfg_exit;
6607
6608 if (!resp->max_configurable_queues) {
6609 rc = -EINVAL;
6610 goto qportcfg_exit;
6611 }
6612 bp->max_tc = resp->max_configurable_queues;
Michael Chan87c374d2016-12-02 21:17:16 -05006613 bp->max_lltc = resp->max_configurable_lossless_queues;
Michael Chanc0c050c2015-10-22 16:01:17 -04006614 if (bp->max_tc > BNXT_MAX_QUEUE)
6615 bp->max_tc = BNXT_MAX_QUEUE;
6616
Michael Chanaabfc012018-08-05 16:51:58 -04006617 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6618 qptr = &resp->queue_id0;
6619 for (i = 0, j = 0; i < bp->max_tc; i++) {
Michael Chan98f04cf2018-10-14 07:02:43 -04006620 bp->q_info[j].queue_id = *qptr;
6621 bp->q_ids[i] = *qptr++;
Michael Chanaabfc012018-08-05 16:51:58 -04006622 bp->q_info[j].queue_profile = *qptr++;
6623 bp->tc_to_qidx[j] = j;
6624 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6625 (no_rdma && BNXT_PF(bp)))
6626 j++;
6627 }
Michael Chan98f04cf2018-10-14 07:02:43 -04006628 bp->max_q = bp->max_tc;
Michael Chanaabfc012018-08-05 16:51:58 -04006629 bp->max_tc = max_t(u8, j, 1);
6630
Michael Chan441cabb2016-09-19 03:58:02 -04006631 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6632 bp->max_tc = 1;
6633
Michael Chan87c374d2016-12-02 21:17:16 -05006634 if (bp->max_lltc > bp->max_tc)
6635 bp->max_lltc = bp->max_tc;
6636
Michael Chanc0c050c2015-10-22 16:01:17 -04006637qportcfg_exit:
6638 mutex_unlock(&bp->hwrm_cmd_lock);
6639 return rc;
6640}
6641
6642static int bnxt_hwrm_ver_get(struct bnxt *bp)
6643{
6644 int rc;
6645 struct hwrm_ver_get_input req = {0};
6646 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
Deepak Khungare605db82017-05-29 19:06:04 -04006647 u32 dev_caps_cfg;
Michael Chanc0c050c2015-10-22 16:01:17 -04006648
Michael Chane6ef2692016-03-28 19:46:05 -04006649 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04006650 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6651 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6652 req.hwrm_intf_min = HWRM_VERSION_MINOR;
6653 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6654 mutex_lock(&bp->hwrm_cmd_lock);
6655 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6656 if (rc)
6657 goto hwrm_ver_get_exit;
6658
6659 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6660
Michael Chan894aa692018-01-17 03:21:03 -05006661 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6662 resp->hwrm_intf_min_8b << 8 |
6663 resp->hwrm_intf_upd_8b;
6664 if (resp->hwrm_intf_maj_8b < 1) {
Michael Chanc1935542015-12-27 18:19:28 -05006665 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chan894aa692018-01-17 03:21:03 -05006666 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6667 resp->hwrm_intf_upd_8b);
Michael Chanc1935542015-12-27 18:19:28 -05006668 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04006669 }
Michael Chan431aa1e2017-10-26 11:51:23 -04006670 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
Michael Chan894aa692018-01-17 03:21:03 -05006671 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6672 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
Michael Chanc0c050c2015-10-22 16:01:17 -04006673
Michael Chanff4fe812016-02-26 04:00:04 -05006674 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6675 if (!bp->hwrm_cmd_timeout)
6676 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6677
Michael Chan1dfddc42018-10-14 07:02:39 -04006678 if (resp->hwrm_intf_maj_8b >= 1) {
Michael Chane6ef2692016-03-28 19:46:05 -04006679 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
Michael Chan1dfddc42018-10-14 07:02:39 -04006680 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6681 }
6682 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6683 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
Michael Chane6ef2692016-03-28 19:46:05 -04006684
Michael Chan659c8052016-06-13 02:25:33 -04006685 bp->chip_num = le16_to_cpu(resp->chip_num);
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006686 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6687 !resp->chip_metal)
6688 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
Michael Chan659c8052016-06-13 02:25:33 -04006689
Deepak Khungare605db82017-05-29 19:06:04 -04006690 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6691 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6692 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
Michael Chan97381a12018-08-05 16:51:54 -04006693 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
Deepak Khungare605db82017-05-29 19:06:04 -04006694
Venkat Duvvuru760b6d32018-12-20 03:38:48 -05006695 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
6696 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
6697
Venkat Duvvuruabd43a12018-12-20 03:38:52 -05006698 if (dev_caps_cfg &
6699 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
6700 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
6701
Michael Chan2a516442019-02-19 05:31:14 -05006702 if (dev_caps_cfg &
6703 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
6704 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
6705
Michael Chanc0c050c2015-10-22 16:01:17 -04006706hwrm_ver_get_exit:
6707 mutex_unlock(&bp->hwrm_cmd_lock);
6708 return rc;
6709}
6710
Rob Swindell5ac67d82016-09-19 03:58:03 -04006711int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6712{
6713 struct hwrm_fw_set_time_input req = {0};
Arnd Bergmann7dfaa7b2017-11-06 15:04:39 +01006714 struct tm tm;
6715 time64_t now = ktime_get_real_seconds();
Rob Swindell5ac67d82016-09-19 03:58:03 -04006716
Michael Chanca2c39e2018-04-26 17:44:34 -04006717 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6718 bp->hwrm_spec_code < 0x10400)
Rob Swindell5ac67d82016-09-19 03:58:03 -04006719 return -EOPNOTSUPP;
6720
Arnd Bergmann7dfaa7b2017-11-06 15:04:39 +01006721 time64_to_tm(now, 0, &tm);
Rob Swindell5ac67d82016-09-19 03:58:03 -04006722 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6723 req.year = cpu_to_le16(1900 + tm.tm_year);
6724 req.month = 1 + tm.tm_mon;
6725 req.day = tm.tm_mday;
6726 req.hour = tm.tm_hour;
6727 req.minute = tm.tm_min;
6728 req.second = tm.tm_sec;
6729 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6730}
6731
Michael Chan3bdf56c2016-03-07 15:38:45 -05006732static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6733{
6734 int rc;
6735 struct bnxt_pf_info *pf = &bp->pf;
6736 struct hwrm_port_qstats_input req = {0};
6737
6738 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6739 return 0;
6740
6741 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6742 req.port_id = cpu_to_le16(pf->port_id);
6743 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6744 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6745 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6746 return rc;
6747}
6748
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006749static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6750{
Michael Chan36e53342018-10-14 07:02:38 -04006751 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chane37fed72018-12-16 18:46:26 -05006752 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006753 struct hwrm_port_qstats_ext_input req = {0};
6754 struct bnxt_pf_info *pf = &bp->pf;
Michael Chanad361ad2019-04-25 22:31:53 -04006755 u32 tx_stat_size;
Michael Chan36e53342018-10-14 07:02:38 -04006756 int rc;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006757
6758 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6759 return 0;
6760
6761 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6762 req.port_id = cpu_to_le16(pf->port_id);
6763 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6764 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
Michael Chanad361ad2019-04-25 22:31:53 -04006765 tx_stat_size = bp->hw_tx_port_stats_ext ?
6766 sizeof(*bp->hw_tx_port_stats_ext) : 0;
6767 req.tx_stat_size = cpu_to_le16(tx_stat_size);
Michael Chan36e53342018-10-14 07:02:38 -04006768 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6769 mutex_lock(&bp->hwrm_cmd_lock);
6770 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6771 if (!rc) {
6772 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
Michael Chanad361ad2019-04-25 22:31:53 -04006773 bp->fw_tx_stats_ext_size = tx_stat_size ?
6774 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
Michael Chan36e53342018-10-14 07:02:38 -04006775 } else {
6776 bp->fw_rx_stats_ext_size = 0;
6777 bp->fw_tx_stats_ext_size = 0;
6778 }
Michael Chane37fed72018-12-16 18:46:26 -05006779 if (bp->fw_tx_stats_ext_size <=
6780 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
6781 mutex_unlock(&bp->hwrm_cmd_lock);
6782 bp->pri2cos_valid = 0;
6783 return rc;
6784 }
6785
6786 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
6787 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
6788
6789 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
6790 if (!rc) {
6791 struct hwrm_queue_pri2cos_qcfg_output *resp2;
6792 u8 *pri2cos;
6793 int i, j;
6794
6795 resp2 = bp->hwrm_cmd_resp_addr;
6796 pri2cos = &resp2->pri0_cos_queue_id;
6797 for (i = 0; i < 8; i++) {
6798 u8 queue_id = pri2cos[i];
6799
6800 for (j = 0; j < bp->max_q; j++) {
6801 if (bp->q_ids[j] == queue_id)
6802 bp->pri2cos[i] = j;
6803 }
6804 }
6805 bp->pri2cos_valid = 1;
6806 }
Michael Chan36e53342018-10-14 07:02:38 -04006807 mutex_unlock(&bp->hwrm_cmd_lock);
6808 return rc;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006809}
6810
Michael Chanc0c050c2015-10-22 16:01:17 -04006811static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6812{
6813 if (bp->vxlan_port_cnt) {
6814 bnxt_hwrm_tunnel_dst_port_free(
6815 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6816 }
6817 bp->vxlan_port_cnt = 0;
6818 if (bp->nge_port_cnt) {
6819 bnxt_hwrm_tunnel_dst_port_free(
6820 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6821 }
6822 bp->nge_port_cnt = 0;
6823}
6824
6825static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6826{
6827 int rc, i;
6828 u32 tpa_flags = 0;
6829
6830 if (set_tpa)
6831 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6832 for (i = 0; i < bp->nr_vnics; i++) {
6833 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6834 if (rc) {
6835 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
Sankar Patchineelam23e12c82017-03-28 19:47:30 -04006836 i, rc);
Michael Chanc0c050c2015-10-22 16:01:17 -04006837 return rc;
6838 }
6839 }
6840 return 0;
6841}
6842
6843static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6844{
6845 int i;
6846
6847 for (i = 0; i < bp->nr_vnics; i++)
6848 bnxt_hwrm_vnic_set_rss(bp, i, false);
6849}
6850
6851static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6852 bool irq_re_init)
6853{
6854 if (bp->vnic_info) {
6855 bnxt_hwrm_clear_vnic_filter(bp);
6856 /* clear all RSS setting before free vnic ctx */
6857 bnxt_hwrm_clear_vnic_rss(bp);
6858 bnxt_hwrm_vnic_ctx_free(bp);
6859 /* before free the vnic, undo the vnic tpa settings */
6860 if (bp->flags & BNXT_FLAG_TPA)
6861 bnxt_set_tpa(bp, false);
6862 bnxt_hwrm_vnic_free(bp);
6863 }
6864 bnxt_hwrm_ring_free(bp, close_path);
6865 bnxt_hwrm_ring_grp_free(bp);
6866 if (irq_re_init) {
6867 bnxt_hwrm_stat_ctx_free(bp);
6868 bnxt_hwrm_free_tunnel_ports(bp);
6869 }
6870}
6871
Michael Chan39d8ba22017-07-24 12:34:22 -04006872static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6873{
6874 struct hwrm_func_cfg_input req = {0};
6875 int rc;
6876
6877 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6878 req.fid = cpu_to_le16(0xffff);
6879 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6880 if (br_mode == BRIDGE_MODE_VEB)
6881 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6882 else if (br_mode == BRIDGE_MODE_VEPA)
6883 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6884 else
6885 return -EINVAL;
6886 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6887 if (rc)
6888 rc = -EIO;
6889 return rc;
6890}
6891
Michael Chanc3480a62018-01-17 03:21:15 -05006892static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6893{
6894 struct hwrm_func_cfg_input req = {0};
6895 int rc;
6896
6897 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6898 return 0;
6899
6900 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6901 req.fid = cpu_to_le16(0xffff);
6902 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
Michael Chand4f52de02018-03-31 13:54:06 -04006903 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
Michael Chanc3480a62018-01-17 03:21:15 -05006904 if (size == 128)
Michael Chand4f52de02018-03-31 13:54:06 -04006905 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
Michael Chanc3480a62018-01-17 03:21:15 -05006906
6907 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6908 if (rc)
6909 rc = -EIO;
6910 return rc;
6911}
6912
Michael Chan7b3af4f2018-10-14 07:02:54 -04006913static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04006914{
Michael Chanae10ae72016-12-29 12:13:38 -05006915 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
Michael Chanc0c050c2015-10-22 16:01:17 -04006916 int rc;
6917
Michael Chanae10ae72016-12-29 12:13:38 -05006918 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6919 goto skip_rss_ctx;
6920
Michael Chanc0c050c2015-10-22 16:01:17 -04006921 /* allocate context for vnic */
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04006922 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04006923 if (rc) {
6924 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6925 vnic_id, rc);
6926 goto vnic_setup_err;
6927 }
6928 bp->rsscos_nr_ctxs++;
6929
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04006930 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6931 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6932 if (rc) {
6933 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6934 vnic_id, rc);
6935 goto vnic_setup_err;
6936 }
6937 bp->rsscos_nr_ctxs++;
6938 }
6939
Michael Chanae10ae72016-12-29 12:13:38 -05006940skip_rss_ctx:
Michael Chanc0c050c2015-10-22 16:01:17 -04006941 /* configure default vnic, ring grp */
6942 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6943 if (rc) {
6944 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6945 vnic_id, rc);
6946 goto vnic_setup_err;
6947 }
6948
6949 /* Enable RSS hashing on vnic */
6950 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6951 if (rc) {
6952 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6953 vnic_id, rc);
6954 goto vnic_setup_err;
6955 }
6956
6957 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6958 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6959 if (rc) {
6960 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6961 vnic_id, rc);
6962 }
6963 }
6964
6965vnic_setup_err:
6966 return rc;
6967}
6968
Michael Chan7b3af4f2018-10-14 07:02:54 -04006969static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6970{
6971 int rc, i, nr_ctxs;
6972
6973 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6974 for (i = 0; i < nr_ctxs; i++) {
6975 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6976 if (rc) {
6977 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6978 vnic_id, i, rc);
6979 break;
6980 }
6981 bp->rsscos_nr_ctxs++;
6982 }
6983 if (i < nr_ctxs)
6984 return -ENOMEM;
6985
6986 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6987 if (rc) {
6988 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6989 vnic_id, rc);
6990 return rc;
6991 }
6992 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6993 if (rc) {
6994 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6995 vnic_id, rc);
6996 return rc;
6997 }
6998 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6999 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7000 if (rc) {
7001 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7002 vnic_id, rc);
7003 }
7004 }
7005 return rc;
7006}
7007
7008static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7009{
7010 if (bp->flags & BNXT_FLAG_CHIP_P5)
7011 return __bnxt_setup_vnic_p5(bp, vnic_id);
7012 else
7013 return __bnxt_setup_vnic(bp, vnic_id);
7014}
7015
Michael Chanc0c050c2015-10-22 16:01:17 -04007016static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7017{
7018#ifdef CONFIG_RFS_ACCEL
7019 int i, rc = 0;
7020
7021 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanae10ae72016-12-29 12:13:38 -05007022 struct bnxt_vnic_info *vnic;
Michael Chanc0c050c2015-10-22 16:01:17 -04007023 u16 vnic_id = i + 1;
7024 u16 ring_id = i;
7025
7026 if (vnic_id >= bp->nr_vnics)
7027 break;
7028
Michael Chanae10ae72016-12-29 12:13:38 -05007029 vnic = &bp->vnic_info[vnic_id];
7030 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7031 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7032 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
Michael Chanb81a90d2016-01-02 23:45:01 -05007033 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04007034 if (rc) {
7035 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7036 vnic_id, rc);
7037 break;
7038 }
7039 rc = bnxt_setup_vnic(bp, vnic_id);
7040 if (rc)
7041 break;
7042 }
7043 return rc;
7044#else
7045 return 0;
7046#endif
7047}
7048
Michael Chan17c71ac2016-07-01 18:46:27 -04007049/* Allow PF and VF with default VLAN to be in promiscuous mode */
7050static bool bnxt_promisc_ok(struct bnxt *bp)
7051{
7052#ifdef CONFIG_BNXT_SRIOV
7053 if (BNXT_VF(bp) && !bp->vf.vlan)
7054 return false;
7055#endif
7056 return true;
7057}
7058
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04007059static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7060{
7061 unsigned int rc = 0;
7062
7063 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7064 if (rc) {
7065 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7066 rc);
7067 return rc;
7068 }
7069
7070 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7071 if (rc) {
7072 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7073 rc);
7074 return rc;
7075 }
7076 return rc;
7077}
7078
Michael Chanb664f002015-12-02 01:54:08 -05007079static int bnxt_cfg_rx_mode(struct bnxt *);
Michael Chan7d2837d2016-05-04 16:56:44 -04007080static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
Michael Chanb664f002015-12-02 01:54:08 -05007081
Michael Chanc0c050c2015-10-22 16:01:17 -04007082static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7083{
Michael Chan7d2837d2016-05-04 16:56:44 -04007084 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
Michael Chanc0c050c2015-10-22 16:01:17 -04007085 int rc = 0;
Prashant Sreedharan76595192016-07-18 07:15:22 -04007086 unsigned int rx_nr_rings = bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04007087
7088 if (irq_re_init) {
7089 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7090 if (rc) {
7091 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7092 rc);
7093 goto err_out;
7094 }
7095 }
7096
7097 rc = bnxt_hwrm_ring_alloc(bp);
7098 if (rc) {
7099 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7100 goto err_out;
7101 }
7102
7103 rc = bnxt_hwrm_ring_grp_alloc(bp);
7104 if (rc) {
7105 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7106 goto err_out;
7107 }
7108
Prashant Sreedharan76595192016-07-18 07:15:22 -04007109 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7110 rx_nr_rings--;
7111
Michael Chanc0c050c2015-10-22 16:01:17 -04007112 /* default vnic 0 */
Prashant Sreedharan76595192016-07-18 07:15:22 -04007113 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04007114 if (rc) {
7115 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7116 goto err_out;
7117 }
7118
7119 rc = bnxt_setup_vnic(bp, 0);
7120 if (rc)
7121 goto err_out;
7122
7123 if (bp->flags & BNXT_FLAG_RFS) {
7124 rc = bnxt_alloc_rfs_vnics(bp);
7125 if (rc)
7126 goto err_out;
7127 }
7128
7129 if (bp->flags & BNXT_FLAG_TPA) {
7130 rc = bnxt_set_tpa(bp, true);
7131 if (rc)
7132 goto err_out;
7133 }
7134
7135 if (BNXT_VF(bp))
7136 bnxt_update_vf_mac(bp);
7137
7138 /* Filter for default vnic 0 */
7139 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7140 if (rc) {
7141 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7142 goto err_out;
7143 }
Michael Chan7d2837d2016-05-04 16:56:44 -04007144 vnic->uc_filter_count = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04007145
Michael Chan30e33842018-07-09 02:24:50 -04007146 vnic->rx_mask = 0;
7147 if (bp->dev->flags & IFF_BROADCAST)
7148 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04007149
Michael Chan17c71ac2016-07-01 18:46:27 -04007150 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chan7d2837d2016-05-04 16:56:44 -04007151 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7152
7153 if (bp->dev->flags & IFF_ALLMULTI) {
7154 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7155 vnic->mc_list_count = 0;
7156 } else {
7157 u32 mask = 0;
7158
7159 bnxt_mc_list_updated(bp, &mask);
7160 vnic->rx_mask |= mask;
7161 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007162
Michael Chanb664f002015-12-02 01:54:08 -05007163 rc = bnxt_cfg_rx_mode(bp);
7164 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04007165 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04007166
7167 rc = bnxt_hwrm_set_coal(bp);
7168 if (rc)
7169 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04007170 rc);
7171
7172 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7173 rc = bnxt_setup_nitroa0_vnic(bp);
7174 if (rc)
7175 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7176 rc);
7177 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007178
Michael Chancf6645f2016-06-13 02:25:28 -04007179 if (BNXT_VF(bp)) {
7180 bnxt_hwrm_func_qcfg(bp);
7181 netdev_update_features(bp->dev);
7182 }
7183
Michael Chanc0c050c2015-10-22 16:01:17 -04007184 return 0;
7185
7186err_out:
7187 bnxt_hwrm_resource_free(bp, 0, true);
7188
7189 return rc;
7190}
7191
7192static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7193{
7194 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7195 return 0;
7196}
7197
7198static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7199{
Sankar Patchineelam22479252017-03-28 19:47:29 -04007200 bnxt_init_cp_rings(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007201 bnxt_init_rx_rings(bp);
7202 bnxt_init_tx_rings(bp);
7203 bnxt_init_ring_grps(bp, irq_re_init);
7204 bnxt_init_vnics(bp);
7205
7206 return bnxt_init_chip(bp, irq_re_init);
7207}
7208
Michael Chanc0c050c2015-10-22 16:01:17 -04007209static int bnxt_set_real_num_queues(struct bnxt *bp)
7210{
7211 int rc;
7212 struct net_device *dev = bp->dev;
7213
Michael Chan5f449242017-02-06 16:55:40 -05007214 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7215 bp->tx_nr_rings_xdp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007216 if (rc)
7217 return rc;
7218
7219 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7220 if (rc)
7221 return rc;
7222
7223#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05007224 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04007225 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04007226#endif
7227
7228 return rc;
7229}
7230
Michael Chan6e6c5a52016-01-02 23:45:02 -05007231static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7232 bool shared)
7233{
7234 int _rx = *rx, _tx = *tx;
7235
7236 if (shared) {
7237 *rx = min_t(int, _rx, max);
7238 *tx = min_t(int, _tx, max);
7239 } else {
7240 if (max < 2)
7241 return -ENOMEM;
7242
7243 while (_rx + _tx > max) {
7244 if (_rx > _tx && _rx > 1)
7245 _rx--;
7246 else if (_tx > 1)
7247 _tx--;
7248 }
7249 *rx = _rx;
7250 *tx = _tx;
7251 }
7252 return 0;
7253}
7254
Michael Chan78095922016-12-07 00:26:16 -05007255static void bnxt_setup_msix(struct bnxt *bp)
7256{
7257 const int len = sizeof(bp->irq_tbl[0].name);
7258 struct net_device *dev = bp->dev;
7259 int tcs, i;
7260
7261 tcs = netdev_get_num_tc(dev);
7262 if (tcs > 1) {
Michael Chand1e79252017-02-06 16:55:38 -05007263 int i, off, count;
Michael Chan78095922016-12-07 00:26:16 -05007264
Michael Chand1e79252017-02-06 16:55:38 -05007265 for (i = 0; i < tcs; i++) {
7266 count = bp->tx_nr_rings_per_tc;
7267 off = i * count;
7268 netdev_set_tc_queue(dev, i, count, off);
Michael Chan78095922016-12-07 00:26:16 -05007269 }
7270 }
7271
7272 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chane5811b82018-03-31 13:54:18 -04007273 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
Michael Chan78095922016-12-07 00:26:16 -05007274 char *attr;
7275
7276 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7277 attr = "TxRx";
7278 else if (i < bp->rx_nr_rings)
7279 attr = "rx";
7280 else
7281 attr = "tx";
7282
Michael Chane5811b82018-03-31 13:54:18 -04007283 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7284 attr, i);
7285 bp->irq_tbl[map_idx].handler = bnxt_msix;
Michael Chan78095922016-12-07 00:26:16 -05007286 }
7287}
7288
7289static void bnxt_setup_inta(struct bnxt *bp)
7290{
7291 const int len = sizeof(bp->irq_tbl[0].name);
7292
7293 if (netdev_get_num_tc(bp->dev))
7294 netdev_reset_tc(bp->dev);
7295
7296 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7297 0);
7298 bp->irq_tbl[0].handler = bnxt_inta;
7299}
7300
7301static int bnxt_setup_int_mode(struct bnxt *bp)
7302{
7303 int rc;
7304
7305 if (bp->flags & BNXT_FLAG_USING_MSIX)
7306 bnxt_setup_msix(bp);
7307 else
7308 bnxt_setup_inta(bp);
7309
7310 rc = bnxt_set_real_num_queues(bp);
7311 return rc;
7312}
7313
Michael Chanb7429952017-01-13 01:32:00 -05007314#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05007315static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7316{
Michael Chan6a4f2942018-01-17 03:21:06 -05007317 return bp->hw_resc.max_rsscos_ctxs;
Michael Chan8079e8f2016-12-29 12:13:37 -05007318}
7319
7320static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7321{
Michael Chan6a4f2942018-01-17 03:21:06 -05007322 return bp->hw_resc.max_vnics;
Michael Chan8079e8f2016-12-29 12:13:37 -05007323}
Michael Chanb7429952017-01-13 01:32:00 -05007324#endif
Michael Chan8079e8f2016-12-29 12:13:37 -05007325
Michael Chane4060d32016-12-07 00:26:19 -05007326unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7327{
Michael Chan6a4f2942018-01-17 03:21:06 -05007328 return bp->hw_resc.max_stat_ctxs;
Michael Chane4060d32016-12-07 00:26:19 -05007329}
7330
7331unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7332{
Michael Chan6a4f2942018-01-17 03:21:06 -05007333 return bp->hw_resc.max_cp_rings;
Michael Chane4060d32016-12-07 00:26:19 -05007334}
7335
Michael Chane916b082018-12-16 18:46:20 -05007336static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
Michael Chana588e452016-12-07 00:26:21 -05007337{
Michael Chanc0b8cda2018-12-09 07:01:01 -05007338 unsigned int cp = bp->hw_resc.max_cp_rings;
7339
7340 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7341 cp -= bnxt_get_ulp_msix_num(bp);
7342
7343 return cp;
Michael Chana588e452016-12-07 00:26:21 -05007344}
7345
Michael Chanad95c272018-09-03 04:23:18 -04007346static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
Michael Chan78095922016-12-07 00:26:16 -05007347{
Michael Chan6a4f2942018-01-17 03:21:06 -05007348 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7349
Michael Chanf7588cd2018-12-16 18:46:19 -05007350 if (bp->flags & BNXT_FLAG_CHIP_P5)
7351 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7352
Michael Chan6a4f2942018-01-17 03:21:06 -05007353 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
Michael Chan78095922016-12-07 00:26:16 -05007354}
7355
Michael Chan30f52942018-07-09 02:24:51 -04007356static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
Michael Chan33c26572016-12-07 00:26:15 -05007357{
Michael Chan6a4f2942018-01-17 03:21:06 -05007358 bp->hw_resc.max_irqs = max_irqs;
Michael Chan33c26572016-12-07 00:26:15 -05007359}
7360
Michael Chane916b082018-12-16 18:46:20 -05007361unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7362{
7363 unsigned int cp;
7364
7365 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7366 if (bp->flags & BNXT_FLAG_CHIP_P5)
7367 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7368 else
7369 return cp - bp->cp_nr_rings;
7370}
7371
Vasundhara Volamc027c6b2018-12-16 18:46:21 -05007372unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7373{
7374 unsigned int stat;
7375
7376 stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
7377 stat -= bp->cp_nr_rings;
7378 return stat;
7379}
7380
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007381int bnxt_get_avail_msix(struct bnxt *bp, int num)
7382{
7383 int max_cp = bnxt_get_max_func_cp_rings(bp);
7384 int max_irq = bnxt_get_max_func_irqs(bp);
7385 int total_req = bp->cp_nr_rings + num;
7386 int max_idx, avail_msix;
7387
Michael Chan75720e62018-12-09 07:01:00 -05007388 max_idx = bp->total_irqs;
7389 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7390 max_idx = min_t(int, bp->total_irqs, max_cp);
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007391 avail_msix = max_idx - bp->cp_nr_rings;
Michael Chanf1ca94d2018-08-05 16:51:53 -04007392 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007393 return avail_msix;
7394
7395 if (max_irq < total_req) {
7396 num = max_irq - bp->cp_nr_rings;
7397 if (num <= 0)
7398 return 0;
7399 }
7400 return num;
7401}
7402
Michael Chan08654eb2018-03-31 13:54:17 -04007403static int bnxt_get_num_msix(struct bnxt *bp)
7404{
Michael Chanf1ca94d2018-08-05 16:51:53 -04007405 if (!BNXT_NEW_RM(bp))
Michael Chan08654eb2018-03-31 13:54:17 -04007406 return bnxt_get_max_func_irqs(bp);
7407
Michael Chanc0b8cda2018-12-09 07:01:01 -05007408 return bnxt_nq_rings_in_use(bp);
Michael Chan08654eb2018-03-31 13:54:17 -04007409}
7410
Michael Chan78095922016-12-07 00:26:16 -05007411static int bnxt_init_msix(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007412{
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007413 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
Michael Chan78095922016-12-07 00:26:16 -05007414 struct msix_entry *msix_ent;
Michael Chanc0c050c2015-10-22 16:01:17 -04007415
Michael Chan08654eb2018-03-31 13:54:17 -04007416 total_vecs = bnxt_get_num_msix(bp);
7417 max = bnxt_get_max_func_irqs(bp);
7418 if (total_vecs > max)
7419 total_vecs = max;
7420
Michael Chan2773dfb2018-04-26 17:44:42 -04007421 if (!total_vecs)
7422 return 0;
7423
Michael Chanc0c050c2015-10-22 16:01:17 -04007424 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7425 if (!msix_ent)
7426 return -ENOMEM;
7427
7428 for (i = 0; i < total_vecs; i++) {
7429 msix_ent[i].entry = i;
7430 msix_ent[i].vector = 0;
7431 }
7432
Michael Chan01657bc2016-01-02 23:45:03 -05007433 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7434 min = 2;
7435
7436 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007437 ulp_msix = bnxt_get_ulp_msix_num(bp);
7438 if (total_vecs < 0 || total_vecs < ulp_msix) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007439 rc = -ENODEV;
7440 goto msix_setup_exit;
7441 }
7442
7443 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7444 if (bp->irq_tbl) {
Michael Chan78095922016-12-07 00:26:16 -05007445 for (i = 0; i < total_vecs; i++)
7446 bp->irq_tbl[i].vector = msix_ent[i].vector;
Michael Chanc0c050c2015-10-22 16:01:17 -04007447
Michael Chan78095922016-12-07 00:26:16 -05007448 bp->total_irqs = total_vecs;
Michael Chanc0c050c2015-10-22 16:01:17 -04007449 /* Trim rings based upon num of vectors allocated */
Michael Chan6e6c5a52016-01-02 23:45:02 -05007450 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007451 total_vecs - ulp_msix, min == 1);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007452 if (rc)
7453 goto msix_setup_exit;
7454
Michael Chan78095922016-12-07 00:26:16 -05007455 bp->cp_nr_rings = (min == 1) ?
7456 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7457 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04007458
Michael Chanc0c050c2015-10-22 16:01:17 -04007459 } else {
7460 rc = -ENOMEM;
7461 goto msix_setup_exit;
7462 }
7463 bp->flags |= BNXT_FLAG_USING_MSIX;
7464 kfree(msix_ent);
7465 return 0;
7466
7467msix_setup_exit:
Michael Chan78095922016-12-07 00:26:16 -05007468 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7469 kfree(bp->irq_tbl);
7470 bp->irq_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04007471 pci_disable_msix(bp->pdev);
7472 kfree(msix_ent);
7473 return rc;
7474}
7475
Michael Chan78095922016-12-07 00:26:16 -05007476static int bnxt_init_inta(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007477{
Michael Chanc0c050c2015-10-22 16:01:17 -04007478 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
Michael Chan78095922016-12-07 00:26:16 -05007479 if (!bp->irq_tbl)
7480 return -ENOMEM;
7481
7482 bp->total_irqs = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04007483 bp->rx_nr_rings = 1;
7484 bp->tx_nr_rings = 1;
7485 bp->cp_nr_rings = 1;
Michael Chan01657bc2016-01-02 23:45:03 -05007486 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chanc0c050c2015-10-22 16:01:17 -04007487 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan78095922016-12-07 00:26:16 -05007488 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007489}
7490
Michael Chan78095922016-12-07 00:26:16 -05007491static int bnxt_init_int_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007492{
7493 int rc = 0;
7494
7495 if (bp->flags & BNXT_FLAG_MSIX_CAP)
Michael Chan78095922016-12-07 00:26:16 -05007496 rc = bnxt_init_msix(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007497
Michael Chan1fa72e22016-04-25 02:30:49 -04007498 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007499 /* fallback to INTA */
Michael Chan78095922016-12-07 00:26:16 -05007500 rc = bnxt_init_inta(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007501 }
7502 return rc;
7503}
7504
Michael Chan78095922016-12-07 00:26:16 -05007505static void bnxt_clear_int_mode(struct bnxt *bp)
7506{
7507 if (bp->flags & BNXT_FLAG_USING_MSIX)
7508 pci_disable_msix(bp->pdev);
7509
7510 kfree(bp->irq_tbl);
7511 bp->irq_tbl = NULL;
7512 bp->flags &= ~BNXT_FLAG_USING_MSIX;
7513}
7514
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007515int bnxt_reserve_rings(struct bnxt *bp)
Michael Chan674f50a2018-01-17 03:21:09 -05007516{
Michael Chan674f50a2018-01-17 03:21:09 -05007517 int tcs = netdev_get_num_tc(bp->dev);
Michael Chan36d65be2018-12-16 18:46:24 -05007518 bool reinit_irq = false;
Michael Chan674f50a2018-01-17 03:21:09 -05007519 int rc;
7520
7521 if (!bnxt_need_reserve_rings(bp))
7522 return 0;
7523
Michael Chanf1ca94d2018-08-05 16:51:53 -04007524 if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
Michael Chanec86f142018-03-31 13:54:21 -04007525 bnxt_ulp_irq_stop(bp);
Michael Chan674f50a2018-01-17 03:21:09 -05007526 bnxt_clear_int_mode(bp);
Michael Chan36d65be2018-12-16 18:46:24 -05007527 reinit_irq = true;
7528 }
7529 rc = __bnxt_reserve_rings(bp);
7530 if (reinit_irq) {
7531 if (!rc)
7532 rc = bnxt_init_int_mode(bp);
Michael Chanec86f142018-03-31 13:54:21 -04007533 bnxt_ulp_irq_restart(bp, rc);
Michael Chan36d65be2018-12-16 18:46:24 -05007534 }
7535 if (rc) {
7536 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7537 return rc;
Michael Chan674f50a2018-01-17 03:21:09 -05007538 }
7539 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7540 netdev_err(bp->dev, "tx ring reservation failure\n");
7541 netdev_reset_tc(bp->dev);
7542 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7543 return -ENOMEM;
7544 }
Michael Chan674f50a2018-01-17 03:21:09 -05007545 return 0;
7546}
7547
Michael Chanc0c050c2015-10-22 16:01:17 -04007548static void bnxt_free_irq(struct bnxt *bp)
7549{
7550 struct bnxt_irq *irq;
7551 int i;
7552
7553#ifdef CONFIG_RFS_ACCEL
7554 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7555 bp->dev->rx_cpu_rmap = NULL;
7556#endif
Michael Chancb985262018-04-11 11:50:18 -04007557 if (!bp->irq_tbl || !bp->bnapi)
Michael Chanc0c050c2015-10-22 16:01:17 -04007558 return;
7559
7560 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chane5811b82018-03-31 13:54:18 -04007561 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7562
7563 irq = &bp->irq_tbl[map_idx];
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04007564 if (irq->requested) {
7565 if (irq->have_cpumask) {
7566 irq_set_affinity_hint(irq->vector, NULL);
7567 free_cpumask_var(irq->cpu_mask);
7568 irq->have_cpumask = 0;
7569 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007570 free_irq(irq->vector, bp->bnapi[i]);
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04007571 }
7572
Michael Chanc0c050c2015-10-22 16:01:17 -04007573 irq->requested = 0;
7574 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007575}
7576
7577static int bnxt_request_irq(struct bnxt *bp)
7578{
Michael Chanb81a90d2016-01-02 23:45:01 -05007579 int i, j, rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007580 unsigned long flags = 0;
7581#ifdef CONFIG_RFS_ACCEL
Michael Chane5811b82018-03-31 13:54:18 -04007582 struct cpu_rmap *rmap;
Michael Chanc0c050c2015-10-22 16:01:17 -04007583#endif
7584
Michael Chane5811b82018-03-31 13:54:18 -04007585 rc = bnxt_setup_int_mode(bp);
7586 if (rc) {
7587 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7588 rc);
7589 return rc;
7590 }
7591#ifdef CONFIG_RFS_ACCEL
7592 rmap = bp->dev->rx_cpu_rmap;
7593#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04007594 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7595 flags = IRQF_SHARED;
7596
Michael Chanb81a90d2016-01-02 23:45:01 -05007597 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
Michael Chane5811b82018-03-31 13:54:18 -04007598 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7599 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7600
Michael Chanc0c050c2015-10-22 16:01:17 -04007601#ifdef CONFIG_RFS_ACCEL
Michael Chanb81a90d2016-01-02 23:45:01 -05007602 if (rmap && bp->bnapi[i]->rx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007603 rc = irq_cpu_rmap_add(rmap, irq->vector);
7604 if (rc)
7605 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05007606 j);
7607 j++;
Michael Chanc0c050c2015-10-22 16:01:17 -04007608 }
7609#endif
7610 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7611 bp->bnapi[i]);
7612 if (rc)
7613 break;
7614
7615 irq->requested = 1;
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04007616
7617 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7618 int numa_node = dev_to_node(&bp->pdev->dev);
7619
7620 irq->have_cpumask = 1;
7621 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7622 irq->cpu_mask);
7623 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7624 if (rc) {
7625 netdev_warn(bp->dev,
7626 "Set affinity failed, IRQ = %d\n",
7627 irq->vector);
7628 break;
7629 }
7630 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007631 }
7632 return rc;
7633}
7634
7635static void bnxt_del_napi(struct bnxt *bp)
7636{
7637 int i;
7638
7639 if (!bp->bnapi)
7640 return;
7641
7642 for (i = 0; i < bp->cp_nr_rings; i++) {
7643 struct bnxt_napi *bnapi = bp->bnapi[i];
7644
7645 napi_hash_del(&bnapi->napi);
7646 netif_napi_del(&bnapi->napi);
7647 }
Eric Dumazete5f6f562016-11-16 06:31:52 -08007648 /* We called napi_hash_del() before netif_napi_del(), we need
7649 * to respect an RCU grace period before freeing napi structures.
7650 */
7651 synchronize_net();
Michael Chanc0c050c2015-10-22 16:01:17 -04007652}
7653
7654static void bnxt_init_napi(struct bnxt *bp)
7655{
7656 int i;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007657 unsigned int cp_nr_rings = bp->cp_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04007658 struct bnxt_napi *bnapi;
7659
7660 if (bp->flags & BNXT_FLAG_USING_MSIX) {
Michael Chan0fcec982018-10-14 07:02:58 -04007661 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7662
7663 if (bp->flags & BNXT_FLAG_CHIP_P5)
7664 poll_fn = bnxt_poll_p5;
7665 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007666 cp_nr_rings--;
7667 for (i = 0; i < cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007668 bnapi = bp->bnapi[i];
Michael Chan0fcec982018-10-14 07:02:58 -04007669 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04007670 }
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007671 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7672 bnapi = bp->bnapi[cp_nr_rings];
7673 netif_napi_add(bp->dev, &bnapi->napi,
7674 bnxt_poll_nitroa0, 64);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007675 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007676 } else {
7677 bnapi = bp->bnapi[0];
7678 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04007679 }
7680}
7681
7682static void bnxt_disable_napi(struct bnxt *bp)
7683{
7684 int i;
7685
7686 if (!bp->bnapi)
7687 return;
7688
Andy Gospodarek0bc0b972018-01-26 10:27:47 -05007689 for (i = 0; i < bp->cp_nr_rings; i++) {
7690 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7691
7692 if (bp->bnapi[i]->rx_ring)
7693 cancel_work_sync(&cpr->dim.work);
7694
Michael Chanc0c050c2015-10-22 16:01:17 -04007695 napi_disable(&bp->bnapi[i]->napi);
Andy Gospodarek0bc0b972018-01-26 10:27:47 -05007696 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007697}
7698
7699static void bnxt_enable_napi(struct bnxt *bp)
7700{
7701 int i;
7702
7703 for (i = 0; i < bp->cp_nr_rings; i++) {
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05007704 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
Michael Chanfa7e2812016-05-10 19:18:00 -04007705 bp->bnapi[i]->in_reset = false;
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05007706
7707 if (bp->bnapi[i]->rx_ring) {
7708 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7709 cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7710 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007711 napi_enable(&bp->bnapi[i]->napi);
7712 }
7713}
7714
Michael Chan7df4ae92016-12-02 21:17:17 -05007715void bnxt_tx_disable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007716{
7717 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04007718 struct bnxt_tx_ring_info *txr;
Michael Chanc0c050c2015-10-22 16:01:17 -04007719
Michael Chanb6ab4b02016-01-02 23:44:59 -05007720 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007721 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05007722 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04007723 txr->dev_state = BNXT_DEV_STATE_CLOSING;
Michael Chanc0c050c2015-10-22 16:01:17 -04007724 }
7725 }
7726 /* Stop all TX queues */
7727 netif_tx_disable(bp->dev);
7728 netif_carrier_off(bp->dev);
7729}
7730
Michael Chan7df4ae92016-12-02 21:17:17 -05007731void bnxt_tx_enable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007732{
7733 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04007734 struct bnxt_tx_ring_info *txr;
Michael Chanc0c050c2015-10-22 16:01:17 -04007735
7736 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05007737 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04007738 txr->dev_state = 0;
7739 }
7740 netif_tx_wake_all_queues(bp->dev);
7741 if (bp->link_info.link_up)
7742 netif_carrier_on(bp->dev);
7743}
7744
7745static void bnxt_report_link(struct bnxt *bp)
7746{
7747 if (bp->link_info.link_up) {
7748 const char *duplex;
7749 const char *flow_ctrl;
Deepak Khungar38a21b32017-04-21 20:11:24 -04007750 u32 speed;
7751 u16 fec;
Michael Chanc0c050c2015-10-22 16:01:17 -04007752
7753 netif_carrier_on(bp->dev);
7754 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7755 duplex = "full";
7756 else
7757 duplex = "half";
7758 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7759 flow_ctrl = "ON - receive & transmit";
7760 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7761 flow_ctrl = "ON - transmit";
7762 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7763 flow_ctrl = "ON - receive";
7764 else
7765 flow_ctrl = "none";
7766 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
Deepak Khungar38a21b32017-04-21 20:11:24 -04007767 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04007768 speed, duplex, flow_ctrl);
Michael Chan170ce012016-04-05 14:08:57 -04007769 if (bp->flags & BNXT_FLAG_EEE_CAP)
7770 netdev_info(bp->dev, "EEE is %s\n",
7771 bp->eee.eee_active ? "active" :
7772 "not active");
Michael Chane70c7522017-02-12 19:18:16 -05007773 fec = bp->link_info.fec_cfg;
7774 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7775 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7776 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7777 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7778 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
Michael Chanc0c050c2015-10-22 16:01:17 -04007779 } else {
7780 netif_carrier_off(bp->dev);
7781 netdev_err(bp->dev, "NIC Link is Down\n");
7782 }
7783}
7784
Michael Chan170ce012016-04-05 14:08:57 -04007785static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7786{
7787 int rc = 0;
7788 struct hwrm_port_phy_qcaps_input req = {0};
7789 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan93ed8112016-06-13 02:25:37 -04007790 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan170ce012016-04-05 14:08:57 -04007791
7792 if (bp->hwrm_spec_code < 0x10201)
7793 return 0;
7794
7795 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7796
7797 mutex_lock(&bp->hwrm_cmd_lock);
7798 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7799 if (rc)
7800 goto hwrm_phy_qcaps_exit;
7801
Michael Chanacb20052017-07-24 12:34:20 -04007802 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
Michael Chan170ce012016-04-05 14:08:57 -04007803 struct ethtool_eee *eee = &bp->eee;
7804 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7805
7806 bp->flags |= BNXT_FLAG_EEE_CAP;
7807 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7808 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7809 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7810 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7811 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7812 }
Michael Chan55fd0cf2018-08-05 16:51:48 -04007813 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7814 if (bp->test_info)
7815 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7816 }
Michael Chan520ad892017-03-08 18:44:35 -05007817 if (resp->supported_speeds_auto_mode)
7818 link_info->support_auto_speeds =
7819 le16_to_cpu(resp->supported_speeds_auto_mode);
Michael Chan170ce012016-04-05 14:08:57 -04007820
Michael Chand5430d32017-08-28 13:40:31 -04007821 bp->port_count = resp->port_cnt;
7822
Michael Chan170ce012016-04-05 14:08:57 -04007823hwrm_phy_qcaps_exit:
7824 mutex_unlock(&bp->hwrm_cmd_lock);
7825 return rc;
7826}
7827
Michael Chanc0c050c2015-10-22 16:01:17 -04007828static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7829{
7830 int rc = 0;
7831 struct bnxt_link_info *link_info = &bp->link_info;
7832 struct hwrm_port_phy_qcfg_input req = {0};
7833 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7834 u8 link_up = link_info->link_up;
Michael Chan286ef9d2016-11-16 21:13:08 -05007835 u16 diff;
Michael Chanc0c050c2015-10-22 16:01:17 -04007836
7837 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7838
7839 mutex_lock(&bp->hwrm_cmd_lock);
7840 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7841 if (rc) {
7842 mutex_unlock(&bp->hwrm_cmd_lock);
7843 return rc;
7844 }
7845
7846 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7847 link_info->phy_link_status = resp->link;
Michael Chanacb20052017-07-24 12:34:20 -04007848 link_info->duplex = resp->duplex_cfg;
7849 if (bp->hwrm_spec_code >= 0x10800)
7850 link_info->duplex = resp->duplex_state;
Michael Chanc0c050c2015-10-22 16:01:17 -04007851 link_info->pause = resp->pause;
7852 link_info->auto_mode = resp->auto_mode;
7853 link_info->auto_pause_setting = resp->auto_pause;
Michael Chan32773602016-03-07 15:38:42 -05007854 link_info->lp_pause = resp->link_partner_adv_pause;
Michael Chanc0c050c2015-10-22 16:01:17 -04007855 link_info->force_pause_setting = resp->force_pause;
Michael Chanacb20052017-07-24 12:34:20 -04007856 link_info->duplex_setting = resp->duplex_cfg;
Michael Chanc0c050c2015-10-22 16:01:17 -04007857 if (link_info->phy_link_status == BNXT_LINK_LINK)
7858 link_info->link_speed = le16_to_cpu(resp->link_speed);
7859 else
7860 link_info->link_speed = 0;
7861 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
Michael Chanc0c050c2015-10-22 16:01:17 -04007862 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7863 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
Michael Chan32773602016-03-07 15:38:42 -05007864 link_info->lp_auto_link_speeds =
7865 le16_to_cpu(resp->link_partner_adv_speeds);
Michael Chanc0c050c2015-10-22 16:01:17 -04007866 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7867 link_info->phy_ver[0] = resp->phy_maj;
7868 link_info->phy_ver[1] = resp->phy_min;
7869 link_info->phy_ver[2] = resp->phy_bld;
7870 link_info->media_type = resp->media_type;
Michael Chan03efbec2016-04-11 04:11:11 -04007871 link_info->phy_type = resp->phy_type;
Michael Chan11f15ed2016-04-05 14:08:55 -04007872 link_info->transceiver = resp->xcvr_pkg_type;
Michael Chan170ce012016-04-05 14:08:57 -04007873 link_info->phy_addr = resp->eee_config_phy_addr &
7874 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04007875 link_info->module_status = resp->module_status;
Michael Chanc0c050c2015-10-22 16:01:17 -04007876
Michael Chan170ce012016-04-05 14:08:57 -04007877 if (bp->flags & BNXT_FLAG_EEE_CAP) {
7878 struct ethtool_eee *eee = &bp->eee;
7879 u16 fw_speeds;
7880
7881 eee->eee_active = 0;
7882 if (resp->eee_config_phy_addr &
7883 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7884 eee->eee_active = 1;
7885 fw_speeds = le16_to_cpu(
7886 resp->link_partner_adv_eee_link_speed_mask);
7887 eee->lp_advertised =
7888 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7889 }
7890
7891 /* Pull initial EEE config */
7892 if (!chng_link_state) {
7893 if (resp->eee_config_phy_addr &
7894 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7895 eee->eee_enabled = 1;
7896
7897 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7898 eee->advertised =
7899 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7900
7901 if (resp->eee_config_phy_addr &
7902 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7903 __le32 tmr;
7904
7905 eee->tx_lpi_enabled = 1;
7906 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7907 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7908 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7909 }
7910 }
7911 }
Michael Chane70c7522017-02-12 19:18:16 -05007912
7913 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7914 if (bp->hwrm_spec_code >= 0x10504)
7915 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7916
Michael Chanc0c050c2015-10-22 16:01:17 -04007917 /* TODO: need to add more logic to report VF link */
7918 if (chng_link_state) {
7919 if (link_info->phy_link_status == BNXT_LINK_LINK)
7920 link_info->link_up = 1;
7921 else
7922 link_info->link_up = 0;
7923 if (link_up != link_info->link_up)
7924 bnxt_report_link(bp);
7925 } else {
7926 /* alwasy link down if not require to update link state */
7927 link_info->link_up = 0;
7928 }
7929 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan286ef9d2016-11-16 21:13:08 -05007930
Michael Chandac049072018-05-08 03:18:39 -04007931 if (!BNXT_SINGLE_PF(bp))
7932 return 0;
7933
Michael Chan286ef9d2016-11-16 21:13:08 -05007934 diff = link_info->support_auto_speeds ^ link_info->advertising;
7935 if ((link_info->support_auto_speeds | diff) !=
7936 link_info->support_auto_speeds) {
7937 /* An advertised speed is no longer supported, so we need to
Michael Chan0eaa24b2017-01-25 02:55:08 -05007938 * update the advertisement settings. Caller holds RTNL
7939 * so we can modify link settings.
Michael Chan286ef9d2016-11-16 21:13:08 -05007940 */
Michael Chan286ef9d2016-11-16 21:13:08 -05007941 link_info->advertising = link_info->support_auto_speeds;
Michael Chan0eaa24b2017-01-25 02:55:08 -05007942 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
Michael Chan286ef9d2016-11-16 21:13:08 -05007943 bnxt_hwrm_set_link_setting(bp, true, false);
Michael Chan286ef9d2016-11-16 21:13:08 -05007944 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007945 return 0;
7946}
7947
Michael Chan10289be2016-05-15 03:04:49 -04007948static void bnxt_get_port_module_status(struct bnxt *bp)
7949{
7950 struct bnxt_link_info *link_info = &bp->link_info;
7951 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7952 u8 module_status;
7953
7954 if (bnxt_update_link(bp, true))
7955 return;
7956
7957 module_status = link_info->module_status;
7958 switch (module_status) {
7959 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7960 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7961 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7962 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7963 bp->pf.port_id);
7964 if (bp->hwrm_spec_code >= 0x10201) {
7965 netdev_warn(bp->dev, "Module part number %s\n",
7966 resp->phy_vendor_partnumber);
7967 }
7968 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7969 netdev_warn(bp->dev, "TX is disabled\n");
7970 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7971 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7972 }
7973}
7974
Michael Chanc0c050c2015-10-22 16:01:17 -04007975static void
7976bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7977{
7978 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
Michael Chanc9ee9512016-04-05 14:08:56 -04007979 if (bp->hwrm_spec_code >= 0x10201)
7980 req->auto_pause =
7981 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04007982 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7983 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7984 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
Michael Chan49b5c7a2016-03-28 19:46:06 -04007985 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04007986 req->enables |=
7987 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7988 } else {
7989 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7990 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7991 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7992 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7993 req->enables |=
7994 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
Michael Chanc9ee9512016-04-05 14:08:56 -04007995 if (bp->hwrm_spec_code >= 0x10201) {
7996 req->auto_pause = req->force_pause;
7997 req->enables |= cpu_to_le32(
7998 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7999 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008000 }
8001}
8002
8003static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8004 struct hwrm_port_phy_cfg_input *req)
8005{
8006 u8 autoneg = bp->link_info.autoneg;
8007 u16 fw_link_speed = bp->link_info.req_link_speed;
Michael Chan68515a12016-12-29 12:13:34 -05008008 u16 advertising = bp->link_info.advertising;
Michael Chanc0c050c2015-10-22 16:01:17 -04008009
8010 if (autoneg & BNXT_AUTONEG_SPEED) {
8011 req->auto_mode |=
Michael Chan11f15ed2016-04-05 14:08:55 -04008012 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04008013
8014 req->enables |= cpu_to_le32(
8015 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8016 req->auto_link_speed_mask = cpu_to_le16(advertising);
8017
8018 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8019 req->flags |=
8020 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8021 } else {
8022 req->force_link_speed = cpu_to_le16(fw_link_speed);
8023 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8024 }
8025
Michael Chanc0c050c2015-10-22 16:01:17 -04008026 /* tell chimp that the setting takes effect immediately */
8027 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8028}
8029
8030int bnxt_hwrm_set_pause(struct bnxt *bp)
8031{
8032 struct hwrm_port_phy_cfg_input req = {0};
8033 int rc;
8034
8035 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8036 bnxt_hwrm_set_pause_common(bp, &req);
8037
8038 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8039 bp->link_info.force_link_chng)
8040 bnxt_hwrm_set_link_common(bp, &req);
8041
8042 mutex_lock(&bp->hwrm_cmd_lock);
8043 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8044 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8045 /* since changing of pause setting doesn't trigger any link
8046 * change event, the driver needs to update the current pause
8047 * result upon successfully return of the phy_cfg command
8048 */
8049 bp->link_info.pause =
8050 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8051 bp->link_info.auto_pause_setting = 0;
8052 if (!bp->link_info.force_link_chng)
8053 bnxt_report_link(bp);
8054 }
8055 bp->link_info.force_link_chng = false;
8056 mutex_unlock(&bp->hwrm_cmd_lock);
8057 return rc;
8058}
8059
Michael Chan939f7f02016-04-05 14:08:58 -04008060static void bnxt_hwrm_set_eee(struct bnxt *bp,
8061 struct hwrm_port_phy_cfg_input *req)
8062{
8063 struct ethtool_eee *eee = &bp->eee;
8064
8065 if (eee->eee_enabled) {
8066 u16 eee_speeds;
8067 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8068
8069 if (eee->tx_lpi_enabled)
8070 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8071 else
8072 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8073
8074 req->flags |= cpu_to_le32(flags);
8075 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8076 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8077 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8078 } else {
8079 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8080 }
8081}
8082
8083int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
Michael Chanc0c050c2015-10-22 16:01:17 -04008084{
8085 struct hwrm_port_phy_cfg_input req = {0};
8086
8087 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8088 if (set_pause)
8089 bnxt_hwrm_set_pause_common(bp, &req);
8090
8091 bnxt_hwrm_set_link_common(bp, &req);
Michael Chan939f7f02016-04-05 14:08:58 -04008092
8093 if (set_eee)
8094 bnxt_hwrm_set_eee(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04008095 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8096}
8097
Michael Chan33f7d552016-04-11 04:11:12 -04008098static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8099{
8100 struct hwrm_port_phy_cfg_input req = {0};
8101
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04008102 if (!BNXT_SINGLE_PF(bp))
Michael Chan33f7d552016-04-11 04:11:12 -04008103 return 0;
8104
8105 if (pci_num_vf(bp->pdev))
8106 return 0;
8107
8108 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
Michael Chan16d663a2016-11-16 21:13:07 -05008109 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
Michael Chan33f7d552016-04-11 04:11:12 -04008110 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8111}
8112
Michael Chan25e1acd2018-08-05 16:51:55 -04008113static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8114{
8115 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8116 struct hwrm_func_drv_if_change_input req = {0};
8117 bool resc_reinit = false;
8118 int rc;
8119
8120 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8121 return 0;
8122
8123 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8124 if (up)
8125 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8126 mutex_lock(&bp->hwrm_cmd_lock);
8127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8128 if (!rc && (resp->flags &
8129 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8130 resc_reinit = true;
8131 mutex_unlock(&bp->hwrm_cmd_lock);
8132
8133 if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8134 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8135
8136 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8137 hw_resc->resv_cp_rings = 0;
Vasundhara Volam780baad2018-12-16 18:46:23 -05008138 hw_resc->resv_stat_ctxs = 0;
Michael Chan75720e62018-12-09 07:01:00 -05008139 hw_resc->resv_irqs = 0;
Michael Chan25e1acd2018-08-05 16:51:55 -04008140 hw_resc->resv_tx_rings = 0;
8141 hw_resc->resv_rx_rings = 0;
8142 hw_resc->resv_hw_ring_grps = 0;
8143 hw_resc->resv_vnics = 0;
Michael Chan6b95c3e2018-09-03 04:23:17 -04008144 bp->tx_nr_rings = 0;
8145 bp->rx_nr_rings = 0;
Michael Chan25e1acd2018-08-05 16:51:55 -04008146 }
8147 return rc;
8148}
8149
Michael Chan5ad2cbe2017-01-13 01:32:03 -05008150static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8151{
8152 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8153 struct hwrm_port_led_qcaps_input req = {0};
8154 struct bnxt_pf_info *pf = &bp->pf;
8155 int rc;
8156
8157 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8158 return 0;
8159
8160 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8161 req.port_id = cpu_to_le16(pf->port_id);
8162 mutex_lock(&bp->hwrm_cmd_lock);
8163 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8164 if (rc) {
8165 mutex_unlock(&bp->hwrm_cmd_lock);
8166 return rc;
8167 }
8168 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8169 int i;
8170
8171 bp->num_leds = resp->num_leds;
8172 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8173 bp->num_leds);
8174 for (i = 0; i < bp->num_leds; i++) {
8175 struct bnxt_led_info *led = &bp->leds[i];
8176 __le16 caps = led->led_state_caps;
8177
8178 if (!led->led_group_id ||
8179 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8180 bp->num_leds = 0;
8181 break;
8182 }
8183 }
8184 }
8185 mutex_unlock(&bp->hwrm_cmd_lock);
8186 return 0;
8187}
8188
Michael Chan5282db62017-04-04 18:14:10 -04008189int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8190{
8191 struct hwrm_wol_filter_alloc_input req = {0};
8192 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8193 int rc;
8194
8195 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8196 req.port_id = cpu_to_le16(bp->pf.port_id);
8197 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8198 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8199 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8200 mutex_lock(&bp->hwrm_cmd_lock);
8201 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8202 if (!rc)
8203 bp->wol_filter_id = resp->wol_filter_id;
8204 mutex_unlock(&bp->hwrm_cmd_lock);
8205 return rc;
8206}
8207
8208int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8209{
8210 struct hwrm_wol_filter_free_input req = {0};
8211 int rc;
8212
8213 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8214 req.port_id = cpu_to_le16(bp->pf.port_id);
8215 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8216 req.wol_filter_id = bp->wol_filter_id;
8217 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8218 return rc;
8219}
8220
Michael Chanc1ef1462017-04-04 18:14:07 -04008221static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8222{
8223 struct hwrm_wol_filter_qcfg_input req = {0};
8224 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8225 u16 next_handle = 0;
8226 int rc;
8227
8228 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8229 req.port_id = cpu_to_le16(bp->pf.port_id);
8230 req.handle = cpu_to_le16(handle);
8231 mutex_lock(&bp->hwrm_cmd_lock);
8232 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8233 if (!rc) {
8234 next_handle = le16_to_cpu(resp->next_handle);
8235 if (next_handle != 0) {
8236 if (resp->wol_type ==
8237 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8238 bp->wol = 1;
8239 bp->wol_filter_id = resp->wol_filter_id;
8240 }
8241 }
8242 }
8243 mutex_unlock(&bp->hwrm_cmd_lock);
8244 return next_handle;
8245}
8246
8247static void bnxt_get_wol_settings(struct bnxt *bp)
8248{
8249 u16 handle = 0;
8250
8251 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8252 return;
8253
8254 do {
8255 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8256 } while (handle && handle != 0xffff);
8257}
8258
Vasundhara Volamcde49a42018-08-05 16:51:56 -04008259#ifdef CONFIG_BNXT_HWMON
8260static ssize_t bnxt_show_temp(struct device *dev,
8261 struct device_attribute *devattr, char *buf)
8262{
8263 struct hwrm_temp_monitor_query_input req = {0};
8264 struct hwrm_temp_monitor_query_output *resp;
8265 struct bnxt *bp = dev_get_drvdata(dev);
8266 u32 temp = 0;
8267
8268 resp = bp->hwrm_cmd_resp_addr;
8269 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8270 mutex_lock(&bp->hwrm_cmd_lock);
8271 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8272 temp = resp->temp * 1000; /* display millidegree */
8273 mutex_unlock(&bp->hwrm_cmd_lock);
8274
8275 return sprintf(buf, "%u\n", temp);
8276}
8277static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8278
8279static struct attribute *bnxt_attrs[] = {
8280 &sensor_dev_attr_temp1_input.dev_attr.attr,
8281 NULL
8282};
8283ATTRIBUTE_GROUPS(bnxt);
8284
8285static void bnxt_hwmon_close(struct bnxt *bp)
8286{
8287 if (bp->hwmon_dev) {
8288 hwmon_device_unregister(bp->hwmon_dev);
8289 bp->hwmon_dev = NULL;
8290 }
8291}
8292
8293static void bnxt_hwmon_open(struct bnxt *bp)
8294{
8295 struct pci_dev *pdev = bp->pdev;
8296
8297 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8298 DRV_MODULE_NAME, bp,
8299 bnxt_groups);
8300 if (IS_ERR(bp->hwmon_dev)) {
8301 bp->hwmon_dev = NULL;
8302 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8303 }
8304}
8305#else
8306static void bnxt_hwmon_close(struct bnxt *bp)
8307{
8308}
8309
8310static void bnxt_hwmon_open(struct bnxt *bp)
8311{
8312}
8313#endif
8314
Michael Chan939f7f02016-04-05 14:08:58 -04008315static bool bnxt_eee_config_ok(struct bnxt *bp)
8316{
8317 struct ethtool_eee *eee = &bp->eee;
8318 struct bnxt_link_info *link_info = &bp->link_info;
8319
8320 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8321 return true;
8322
8323 if (eee->eee_enabled) {
8324 u32 advertising =
8325 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8326
8327 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8328 eee->eee_enabled = 0;
8329 return false;
8330 }
8331 if (eee->advertised & ~advertising) {
8332 eee->advertised = advertising & eee->supported;
8333 return false;
8334 }
8335 }
8336 return true;
8337}
8338
Michael Chanc0c050c2015-10-22 16:01:17 -04008339static int bnxt_update_phy_setting(struct bnxt *bp)
8340{
8341 int rc;
8342 bool update_link = false;
8343 bool update_pause = false;
Michael Chan939f7f02016-04-05 14:08:58 -04008344 bool update_eee = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04008345 struct bnxt_link_info *link_info = &bp->link_info;
8346
8347 rc = bnxt_update_link(bp, true);
8348 if (rc) {
8349 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8350 rc);
8351 return rc;
8352 }
Michael Chan33dac242017-02-12 19:18:15 -05008353 if (!BNXT_SINGLE_PF(bp))
8354 return 0;
8355
Michael Chanc0c050c2015-10-22 16:01:17 -04008356 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
Michael Chanc9ee9512016-04-05 14:08:56 -04008357 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8358 link_info->req_flow_ctrl)
Michael Chanc0c050c2015-10-22 16:01:17 -04008359 update_pause = true;
8360 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8361 link_info->force_pause_setting != link_info->req_flow_ctrl)
8362 update_pause = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04008363 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8364 if (BNXT_AUTO_MODE(link_info->auto_mode))
8365 update_link = true;
8366 if (link_info->req_link_speed != link_info->force_link_speed)
8367 update_link = true;
Michael Chande730182016-02-19 19:43:20 -05008368 if (link_info->req_duplex != link_info->duplex_setting)
8369 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04008370 } else {
8371 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8372 update_link = true;
8373 if (link_info->advertising != link_info->auto_link_speeds)
8374 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04008375 }
8376
Michael Chan16d663a2016-11-16 21:13:07 -05008377 /* The last close may have shutdown the link, so need to call
8378 * PHY_CFG to bring it back up.
8379 */
8380 if (!netif_carrier_ok(bp->dev))
8381 update_link = true;
8382
Michael Chan939f7f02016-04-05 14:08:58 -04008383 if (!bnxt_eee_config_ok(bp))
8384 update_eee = true;
8385
Michael Chanc0c050c2015-10-22 16:01:17 -04008386 if (update_link)
Michael Chan939f7f02016-04-05 14:08:58 -04008387 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
Michael Chanc0c050c2015-10-22 16:01:17 -04008388 else if (update_pause)
8389 rc = bnxt_hwrm_set_pause(bp);
8390 if (rc) {
8391 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8392 rc);
8393 return rc;
8394 }
8395
8396 return rc;
8397}
8398
Jeffrey Huang11809492015-11-05 16:25:49 -05008399/* Common routine to pre-map certain register block to different GRC window.
8400 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8401 * in PF and 3 windows in VF that can be customized to map in different
8402 * register blocks.
8403 */
8404static void bnxt_preset_reg_win(struct bnxt *bp)
8405{
8406 if (BNXT_PF(bp)) {
8407 /* CAG registers map to GRC window #4 */
8408 writel(BNXT_CAG_REG_BASE,
8409 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8410 }
8411}
8412
Michael Chan47558ac2018-04-26 17:44:44 -04008413static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8414
Michael Chanc0c050c2015-10-22 16:01:17 -04008415static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8416{
8417 int rc = 0;
8418
Jeffrey Huang11809492015-11-05 16:25:49 -05008419 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008420 netif_carrier_off(bp->dev);
8421 if (irq_re_init) {
Michael Chan47558ac2018-04-26 17:44:44 -04008422 /* Reserve rings now if none were reserved at driver probe. */
8423 rc = bnxt_init_dflt_ring_mode(bp);
8424 if (rc) {
8425 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8426 return rc;
8427 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008428 }
Michael Chan41e8d792018-10-14 07:02:48 -04008429 rc = bnxt_reserve_rings(bp);
8430 if (rc)
8431 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008432 if ((bp->flags & BNXT_FLAG_RFS) &&
8433 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8434 /* disable RFS if falling back to INTA */
8435 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8436 bp->flags &= ~BNXT_FLAG_RFS;
8437 }
8438
8439 rc = bnxt_alloc_mem(bp, irq_re_init);
8440 if (rc) {
8441 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8442 goto open_err_free_mem;
8443 }
8444
8445 if (irq_re_init) {
8446 bnxt_init_napi(bp);
8447 rc = bnxt_request_irq(bp);
8448 if (rc) {
8449 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
Vikas Guptac58387a2018-07-09 02:24:52 -04008450 goto open_err_irq;
Michael Chanc0c050c2015-10-22 16:01:17 -04008451 }
8452 }
8453
8454 bnxt_enable_napi(bp);
Andy Gospodarekcabfb092018-04-26 17:44:40 -04008455 bnxt_debug_dev_init(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008456
8457 rc = bnxt_init_nic(bp, irq_re_init);
8458 if (rc) {
8459 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8460 goto open_err;
8461 }
8462
8463 if (link_re_init) {
Michael Chane2dc9b62017-10-13 21:09:30 -04008464 mutex_lock(&bp->link_lock);
Michael Chanc0c050c2015-10-22 16:01:17 -04008465 rc = bnxt_update_phy_setting(bp);
Michael Chane2dc9b62017-10-13 21:09:30 -04008466 mutex_unlock(&bp->link_lock);
Michael Chana1ef4a72018-08-05 16:51:49 -04008467 if (rc) {
Michael Chanba41d462016-02-19 19:43:21 -05008468 netdev_warn(bp->dev, "failed to update phy settings\n");
Michael Chana1ef4a72018-08-05 16:51:49 -04008469 if (BNXT_SINGLE_PF(bp)) {
8470 bp->link_info.phy_retry = true;
8471 bp->link_info.phy_retry_expires =
8472 jiffies + 5 * HZ;
8473 }
8474 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008475 }
8476
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07008477 if (irq_re_init)
Alexander Duyckad51b8e2016-06-16 12:21:19 -07008478 udp_tunnel_get_rx_info(bp->dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04008479
Michael Chancaefe522015-12-09 19:35:42 -05008480 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04008481 bnxt_enable_int(bp);
8482 /* Enable TX queues */
8483 bnxt_tx_enable(bp);
8484 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan10289be2016-05-15 03:04:49 -04008485 /* Poll link status and check for SFP+ module status */
8486 bnxt_get_port_module_status(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008487
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04008488 /* VF-reps may need to be re-opened after the PF is re-opened */
8489 if (BNXT_PF(bp))
8490 bnxt_vf_reps_open(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008491 return 0;
8492
8493open_err:
Andy Gospodarekcabfb092018-04-26 17:44:40 -04008494 bnxt_debug_dev_exit(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008495 bnxt_disable_napi(bp);
Vikas Guptac58387a2018-07-09 02:24:52 -04008496
8497open_err_irq:
Michael Chanc0c050c2015-10-22 16:01:17 -04008498 bnxt_del_napi(bp);
8499
8500open_err_free_mem:
8501 bnxt_free_skbs(bp);
8502 bnxt_free_irq(bp);
8503 bnxt_free_mem(bp, true);
8504 return rc;
8505}
8506
8507/* rtnl_lock held */
8508int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8509{
8510 int rc = 0;
8511
8512 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8513 if (rc) {
8514 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8515 dev_close(bp->dev);
8516 }
8517 return rc;
8518}
8519
Michael Chanf7dc1ea2017-04-04 18:14:13 -04008520/* rtnl_lock held, open the NIC half way by allocating all resources, but
8521 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
8522 * self tests.
8523 */
8524int bnxt_half_open_nic(struct bnxt *bp)
8525{
8526 int rc = 0;
8527
8528 rc = bnxt_alloc_mem(bp, false);
8529 if (rc) {
8530 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8531 goto half_open_err;
8532 }
8533 rc = bnxt_init_nic(bp, false);
8534 if (rc) {
8535 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8536 goto half_open_err;
8537 }
8538 return 0;
8539
8540half_open_err:
8541 bnxt_free_skbs(bp);
8542 bnxt_free_mem(bp, false);
8543 dev_close(bp->dev);
8544 return rc;
8545}
8546
8547/* rtnl_lock held, this call can only be made after a previous successful
8548 * call to bnxt_half_open_nic().
8549 */
8550void bnxt_half_close_nic(struct bnxt *bp)
8551{
8552 bnxt_hwrm_resource_free(bp, false, false);
8553 bnxt_free_skbs(bp);
8554 bnxt_free_mem(bp, false);
8555}
8556
Michael Chanc0c050c2015-10-22 16:01:17 -04008557static int bnxt_open(struct net_device *dev)
8558{
8559 struct bnxt *bp = netdev_priv(dev);
Michael Chan25e1acd2018-08-05 16:51:55 -04008560 int rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008561
Michael Chan25e1acd2018-08-05 16:51:55 -04008562 bnxt_hwrm_if_change(bp, true);
8563 rc = __bnxt_open_nic(bp, true, true);
8564 if (rc)
8565 bnxt_hwrm_if_change(bp, false);
Vasundhara Volamcde49a42018-08-05 16:51:56 -04008566
8567 bnxt_hwmon_open(bp);
8568
Michael Chan25e1acd2018-08-05 16:51:55 -04008569 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008570}
8571
Michael Chanf9b76eb2017-07-11 13:05:34 -04008572static bool bnxt_drv_busy(struct bnxt *bp)
8573{
8574 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8575 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8576}
8577
Michael Chanb8875ca2018-12-16 18:46:29 -05008578static void bnxt_get_ring_stats(struct bnxt *bp,
8579 struct rtnl_link_stats64 *stats);
8580
Michael Chan86e953d2018-01-17 03:21:04 -05008581static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8582 bool link_re_init)
Michael Chanc0c050c2015-10-22 16:01:17 -04008583{
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04008584 /* Close the VF-reps before closing PF */
8585 if (BNXT_PF(bp))
8586 bnxt_vf_reps_close(bp);
Michael Chan86e953d2018-01-17 03:21:04 -05008587
Michael Chanc0c050c2015-10-22 16:01:17 -04008588 /* Change device state to avoid TX queue wake up's */
8589 bnxt_tx_disable(bp);
8590
Michael Chancaefe522015-12-09 19:35:42 -05008591 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05008592 smp_mb__after_atomic();
Michael Chanf9b76eb2017-07-11 13:05:34 -04008593 while (bnxt_drv_busy(bp))
Michael Chan4cebdce2015-12-09 19:35:43 -05008594 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04008595
Michael Chan9d8bc092016-12-29 12:13:33 -05008596 /* Flush rings and and disable interrupts */
Michael Chanc0c050c2015-10-22 16:01:17 -04008597 bnxt_shutdown_nic(bp, irq_re_init);
8598
8599 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8600
Andy Gospodarekcabfb092018-04-26 17:44:40 -04008601 bnxt_debug_dev_exit(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008602 bnxt_disable_napi(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008603 del_timer_sync(&bp->timer);
8604 bnxt_free_skbs(bp);
8605
Michael Chanb8875ca2018-12-16 18:46:29 -05008606 /* Save ring stats before shutdown */
8607 if (bp->bnapi)
8608 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
Michael Chanc0c050c2015-10-22 16:01:17 -04008609 if (irq_re_init) {
8610 bnxt_free_irq(bp);
8611 bnxt_del_napi(bp);
8612 }
8613 bnxt_free_mem(bp, irq_re_init);
Michael Chan86e953d2018-01-17 03:21:04 -05008614}
8615
8616int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8617{
8618 int rc = 0;
8619
8620#ifdef CONFIG_BNXT_SRIOV
8621 if (bp->sriov_cfg) {
8622 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8623 !bp->sriov_cfg,
8624 BNXT_SRIOV_CFG_WAIT_TMO);
8625 if (rc)
8626 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8627 }
8628#endif
8629 __bnxt_close_nic(bp, irq_re_init, link_re_init);
Michael Chanc0c050c2015-10-22 16:01:17 -04008630 return rc;
8631}
8632
8633static int bnxt_close(struct net_device *dev)
8634{
8635 struct bnxt *bp = netdev_priv(dev);
8636
Vasundhara Volamcde49a42018-08-05 16:51:56 -04008637 bnxt_hwmon_close(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008638 bnxt_close_nic(bp, true, true);
Michael Chan33f7d552016-04-11 04:11:12 -04008639 bnxt_hwrm_shutdown_link(bp);
Michael Chan25e1acd2018-08-05 16:51:55 -04008640 bnxt_hwrm_if_change(bp, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04008641 return 0;
8642}
8643
Vasundhara Volam0ca12be2019-02-19 05:31:15 -05008644static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
8645 u16 *val)
8646{
8647 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
8648 struct hwrm_port_phy_mdio_read_input req = {0};
8649 int rc;
8650
8651 if (bp->hwrm_spec_code < 0x10a00)
8652 return -EOPNOTSUPP;
8653
8654 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
8655 req.port_id = cpu_to_le16(bp->pf.port_id);
8656 req.phy_addr = phy_addr;
8657 req.reg_addr = cpu_to_le16(reg & 0x1f);
8658 if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
8659 req.cl45_mdio = 1;
8660 req.phy_addr = mdio_phy_id_prtad(phy_addr);
8661 req.dev_addr = mdio_phy_id_devad(phy_addr);
8662 req.reg_addr = cpu_to_le16(reg);
8663 }
8664
8665 mutex_lock(&bp->hwrm_cmd_lock);
8666 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8667 if (!rc)
8668 *val = le16_to_cpu(resp->reg_data);
8669 mutex_unlock(&bp->hwrm_cmd_lock);
8670 return rc;
8671}
8672
8673static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
8674 u16 val)
8675{
8676 struct hwrm_port_phy_mdio_write_input req = {0};
8677
8678 if (bp->hwrm_spec_code < 0x10a00)
8679 return -EOPNOTSUPP;
8680
8681 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
8682 req.port_id = cpu_to_le16(bp->pf.port_id);
8683 req.phy_addr = phy_addr;
8684 req.reg_addr = cpu_to_le16(reg & 0x1f);
8685 if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
8686 req.cl45_mdio = 1;
8687 req.phy_addr = mdio_phy_id_prtad(phy_addr);
8688 req.dev_addr = mdio_phy_id_devad(phy_addr);
8689 req.reg_addr = cpu_to_le16(reg);
8690 }
8691 req.reg_data = cpu_to_le16(val);
8692
8693 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8694}
8695
Michael Chanc0c050c2015-10-22 16:01:17 -04008696/* rtnl_lock held */
8697static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8698{
Vasundhara Volam0ca12be2019-02-19 05:31:15 -05008699 struct mii_ioctl_data *mdio = if_mii(ifr);
8700 struct bnxt *bp = netdev_priv(dev);
8701 int rc;
8702
Michael Chanc0c050c2015-10-22 16:01:17 -04008703 switch (cmd) {
8704 case SIOCGMIIPHY:
Vasundhara Volam0ca12be2019-02-19 05:31:15 -05008705 mdio->phy_id = bp->link_info.phy_addr;
8706
Michael Chanc0c050c2015-10-22 16:01:17 -04008707 /* fallthru */
8708 case SIOCGMIIREG: {
Vasundhara Volam0ca12be2019-02-19 05:31:15 -05008709 u16 mii_regval = 0;
8710
Michael Chanc0c050c2015-10-22 16:01:17 -04008711 if (!netif_running(dev))
8712 return -EAGAIN;
8713
Vasundhara Volam0ca12be2019-02-19 05:31:15 -05008714 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
8715 &mii_regval);
8716 mdio->val_out = mii_regval;
8717 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008718 }
8719
8720 case SIOCSMIIREG:
8721 if (!netif_running(dev))
8722 return -EAGAIN;
8723
Vasundhara Volam0ca12be2019-02-19 05:31:15 -05008724 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
8725 mdio->val_in);
Michael Chanc0c050c2015-10-22 16:01:17 -04008726
8727 default:
8728 /* do nothing */
8729 break;
8730 }
8731 return -EOPNOTSUPP;
8732}
8733
Michael Chanb8875ca2018-12-16 18:46:29 -05008734static void bnxt_get_ring_stats(struct bnxt *bp,
8735 struct rtnl_link_stats64 *stats)
Michael Chanc0c050c2015-10-22 16:01:17 -04008736{
Michael Chanb8875ca2018-12-16 18:46:29 -05008737 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04008738
Michael Chanc0c050c2015-10-22 16:01:17 -04008739
Michael Chanc0c050c2015-10-22 16:01:17 -04008740 for (i = 0; i < bp->cp_nr_rings; i++) {
8741 struct bnxt_napi *bnapi = bp->bnapi[i];
8742 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8743 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8744
8745 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8746 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8747 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8748
8749 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8750 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8751 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8752
8753 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8754 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8755 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8756
8757 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8758 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8759 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8760
8761 stats->rx_missed_errors +=
8762 le64_to_cpu(hw_stats->rx_discard_pkts);
8763
8764 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8765
Michael Chanc0c050c2015-10-22 16:01:17 -04008766 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8767 }
Michael Chanb8875ca2018-12-16 18:46:29 -05008768}
8769
8770static void bnxt_add_prev_stats(struct bnxt *bp,
8771 struct rtnl_link_stats64 *stats)
8772{
8773 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
8774
8775 stats->rx_packets += prev_stats->rx_packets;
8776 stats->tx_packets += prev_stats->tx_packets;
8777 stats->rx_bytes += prev_stats->rx_bytes;
8778 stats->tx_bytes += prev_stats->tx_bytes;
8779 stats->rx_missed_errors += prev_stats->rx_missed_errors;
8780 stats->multicast += prev_stats->multicast;
8781 stats->tx_dropped += prev_stats->tx_dropped;
8782}
8783
8784static void
8785bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8786{
8787 struct bnxt *bp = netdev_priv(dev);
8788
8789 set_bit(BNXT_STATE_READ_STATS, &bp->state);
8790 /* Make sure bnxt_close_nic() sees that we are reading stats before
8791 * we check the BNXT_STATE_OPEN flag.
8792 */
8793 smp_mb__after_atomic();
8794 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8795 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8796 *stats = bp->net_stats_prev;
8797 return;
8798 }
8799
8800 bnxt_get_ring_stats(bp, stats);
8801 bnxt_add_prev_stats(bp, stats);
Michael Chanc0c050c2015-10-22 16:01:17 -04008802
Michael Chan9947f832016-03-07 15:38:46 -05008803 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8804 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8805 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8806
8807 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8808 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8809 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8810 le64_to_cpu(rx->rx_ovrsz_frames) +
8811 le64_to_cpu(rx->rx_runt_frames);
8812 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8813 le64_to_cpu(rx->rx_jbr_frames);
8814 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8815 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8816 stats->tx_errors = le64_to_cpu(tx->tx_err);
8817 }
Michael Chanf9b76eb2017-07-11 13:05:34 -04008818 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04008819}
8820
8821static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8822{
8823 struct net_device *dev = bp->dev;
8824 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8825 struct netdev_hw_addr *ha;
8826 u8 *haddr;
8827 int mc_count = 0;
8828 bool update = false;
8829 int off = 0;
8830
8831 netdev_for_each_mc_addr(ha, dev) {
8832 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8833 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8834 vnic->mc_list_count = 0;
8835 return false;
8836 }
8837 haddr = ha->addr;
8838 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8839 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8840 update = true;
8841 }
8842 off += ETH_ALEN;
8843 mc_count++;
8844 }
8845 if (mc_count)
8846 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8847
8848 if (mc_count != vnic->mc_list_count) {
8849 vnic->mc_list_count = mc_count;
8850 update = true;
8851 }
8852 return update;
8853}
8854
8855static bool bnxt_uc_list_updated(struct bnxt *bp)
8856{
8857 struct net_device *dev = bp->dev;
8858 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8859 struct netdev_hw_addr *ha;
8860 int off = 0;
8861
8862 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8863 return true;
8864
8865 netdev_for_each_uc_addr(ha, dev) {
8866 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8867 return true;
8868
8869 off += ETH_ALEN;
8870 }
8871 return false;
8872}
8873
8874static void bnxt_set_rx_mode(struct net_device *dev)
8875{
8876 struct bnxt *bp = netdev_priv(dev);
8877 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8878 u32 mask = vnic->rx_mask;
8879 bool mc_update = false;
8880 bool uc_update;
8881
8882 if (!netif_running(dev))
8883 return;
8884
8885 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8886 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
Michael Chan30e33842018-07-09 02:24:50 -04008887 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8888 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
Michael Chanc0c050c2015-10-22 16:01:17 -04008889
Michael Chan17c71ac2016-07-01 18:46:27 -04008890 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04008891 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8892
8893 uc_update = bnxt_uc_list_updated(bp);
8894
Michael Chan30e33842018-07-09 02:24:50 -04008895 if (dev->flags & IFF_BROADCAST)
8896 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04008897 if (dev->flags & IFF_ALLMULTI) {
8898 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8899 vnic->mc_list_count = 0;
8900 } else {
8901 mc_update = bnxt_mc_list_updated(bp, &mask);
8902 }
8903
8904 if (mask != vnic->rx_mask || uc_update || mc_update) {
8905 vnic->rx_mask = mask;
8906
8907 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04008908 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008909 }
8910}
8911
Michael Chanb664f002015-12-02 01:54:08 -05008912static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04008913{
8914 struct net_device *dev = bp->dev;
8915 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8916 struct netdev_hw_addr *ha;
8917 int i, off = 0, rc;
8918 bool uc_update;
8919
8920 netif_addr_lock_bh(dev);
8921 uc_update = bnxt_uc_list_updated(bp);
8922 netif_addr_unlock_bh(dev);
8923
8924 if (!uc_update)
8925 goto skip_uc;
8926
8927 mutex_lock(&bp->hwrm_cmd_lock);
8928 for (i = 1; i < vnic->uc_filter_count; i++) {
8929 struct hwrm_cfa_l2_filter_free_input req = {0};
8930
8931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8932 -1);
8933
8934 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8935
8936 rc = _hwrm_send_message(bp, &req, sizeof(req),
8937 HWRM_CMD_TIMEOUT);
8938 }
8939 mutex_unlock(&bp->hwrm_cmd_lock);
8940
8941 vnic->uc_filter_count = 1;
8942
8943 netif_addr_lock_bh(dev);
8944 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8945 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8946 } else {
8947 netdev_for_each_uc_addr(ha, dev) {
8948 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8949 off += ETH_ALEN;
8950 vnic->uc_filter_count++;
8951 }
8952 }
8953 netif_addr_unlock_bh(dev);
8954
8955 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8956 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8957 if (rc) {
8958 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8959 rc);
8960 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05008961 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008962 }
8963 }
8964
8965skip_uc:
8966 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
Michael Chanb4e30e82019-04-25 22:31:50 -04008967 if (rc && vnic->mc_list_count) {
8968 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
8969 rc);
8970 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8971 vnic->mc_list_count = 0;
8972 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8973 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008974 if (rc)
Michael Chanb4e30e82019-04-25 22:31:50 -04008975 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04008976 rc);
Michael Chanb664f002015-12-02 01:54:08 -05008977
8978 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008979}
8980
Michael Chan2773dfb2018-04-26 17:44:42 -04008981static bool bnxt_can_reserve_rings(struct bnxt *bp)
8982{
8983#ifdef CONFIG_BNXT_SRIOV
Michael Chanf1ca94d2018-08-05 16:51:53 -04008984 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
Michael Chan2773dfb2018-04-26 17:44:42 -04008985 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8986
8987 /* No minimum rings were provisioned by the PF. Don't
8988 * reserve rings by default when device is down.
8989 */
8990 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8991 return true;
8992
8993 if (!netif_running(bp->dev))
8994 return false;
8995 }
8996#endif
8997 return true;
8998}
8999
Michael Chan8079e8f2016-12-29 12:13:37 -05009000/* If the chip and firmware supports RFS */
9001static bool bnxt_rfs_supported(struct bnxt *bp)
9002{
Michael Chan41e8d792018-10-14 07:02:48 -04009003 if (bp->flags & BNXT_FLAG_CHIP_P5)
9004 return false;
Michael Chan8079e8f2016-12-29 12:13:37 -05009005 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9006 return true;
Michael Chanae10ae72016-12-29 12:13:38 -05009007 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9008 return true;
Michael Chan8079e8f2016-12-29 12:13:37 -05009009 return false;
9010}
9011
9012/* If runtime conditions support RFS */
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009013static bool bnxt_rfs_capable(struct bnxt *bp)
9014{
9015#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05009016 int vnics, max_vnics, max_rss_ctxs;
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009017
Michael Chan41e8d792018-10-14 07:02:48 -04009018 if (bp->flags & BNXT_FLAG_CHIP_P5)
9019 return false;
Michael Chan2773dfb2018-04-26 17:44:42 -04009020 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009021 return false;
9022
9023 vnics = 1 + bp->rx_nr_rings;
Michael Chan8079e8f2016-12-29 12:13:37 -05009024 max_vnics = bnxt_get_max_func_vnics(bp);
9025 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
Michael Chanae10ae72016-12-29 12:13:38 -05009026
9027 /* RSS contexts not a limiting factor */
9028 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9029 max_rss_ctxs = max_vnics;
Michael Chan8079e8f2016-12-29 12:13:37 -05009030 if (vnics > max_vnics || vnics > max_rss_ctxs) {
Michael Chan6a1eef52018-01-17 03:21:10 -05009031 if (bp->rx_nr_rings > 1)
9032 netdev_warn(bp->dev,
9033 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9034 min(max_rss_ctxs - 1, max_vnics - 1));
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009035 return false;
Vasundhara Volama2304902016-07-25 12:33:36 -04009036 }
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009037
Michael Chanf1ca94d2018-08-05 16:51:53 -04009038 if (!BNXT_NEW_RM(bp))
Michael Chan6a1eef52018-01-17 03:21:10 -05009039 return true;
9040
9041 if (vnics == bp->hw_resc.resv_vnics)
9042 return true;
9043
Vasundhara Volam780baad2018-12-16 18:46:23 -05009044 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
Michael Chan6a1eef52018-01-17 03:21:10 -05009045 if (vnics <= bp->hw_resc.resv_vnics)
9046 return true;
9047
9048 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
Vasundhara Volam780baad2018-12-16 18:46:23 -05009049 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
Michael Chan6a1eef52018-01-17 03:21:10 -05009050 return false;
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009051#else
9052 return false;
9053#endif
9054}
9055
Michael Chanc0c050c2015-10-22 16:01:17 -04009056static netdev_features_t bnxt_fix_features(struct net_device *dev,
9057 netdev_features_t features)
9058{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009059 struct bnxt *bp = netdev_priv(dev);
9060
Vasundhara Volama2304902016-07-25 12:33:36 -04009061 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009062 features &= ~NETIF_F_NTUPLE;
Michael Chan5a9f6b22016-06-06 02:37:15 -04009063
Michael Chan1054aee2017-12-16 03:09:42 -05009064 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9065 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9066
9067 if (!(features & NETIF_F_GRO))
9068 features &= ~NETIF_F_GRO_HW;
9069
9070 if (features & NETIF_F_GRO_HW)
9071 features &= ~NETIF_F_LRO;
9072
Michael Chan5a9f6b22016-06-06 02:37:15 -04009073 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9074 * turned on or off together.
9075 */
9076 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9077 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9078 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9079 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9080 NETIF_F_HW_VLAN_STAG_RX);
9081 else
9082 features |= NETIF_F_HW_VLAN_CTAG_RX |
9083 NETIF_F_HW_VLAN_STAG_RX;
9084 }
Michael Chancf6645f2016-06-13 02:25:28 -04009085#ifdef CONFIG_BNXT_SRIOV
9086 if (BNXT_VF(bp)) {
9087 if (bp->vf.vlan) {
9088 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9089 NETIF_F_HW_VLAN_STAG_RX);
9090 }
9091 }
9092#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04009093 return features;
9094}
9095
9096static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9097{
9098 struct bnxt *bp = netdev_priv(dev);
9099 u32 flags = bp->flags;
9100 u32 changes;
9101 int rc = 0;
9102 bool re_init = false;
9103 bool update_tpa = false;
9104
9105 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
Michael Chan1054aee2017-12-16 03:09:42 -05009106 if (features & NETIF_F_GRO_HW)
Michael Chanc0c050c2015-10-22 16:01:17 -04009107 flags |= BNXT_FLAG_GRO;
Michael Chan1054aee2017-12-16 03:09:42 -05009108 else if (features & NETIF_F_LRO)
Michael Chanc0c050c2015-10-22 16:01:17 -04009109 flags |= BNXT_FLAG_LRO;
9110
Michael Chanbdbd1eb2016-12-29 12:13:43 -05009111 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9112 flags &= ~BNXT_FLAG_TPA;
9113
Michael Chanc0c050c2015-10-22 16:01:17 -04009114 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9115 flags |= BNXT_FLAG_STRIP_VLAN;
9116
9117 if (features & NETIF_F_NTUPLE)
9118 flags |= BNXT_FLAG_RFS;
9119
9120 changes = flags ^ bp->flags;
9121 if (changes & BNXT_FLAG_TPA) {
9122 update_tpa = true;
9123 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9124 (flags & BNXT_FLAG_TPA) == 0)
9125 re_init = true;
9126 }
9127
9128 if (changes & ~BNXT_FLAG_TPA)
9129 re_init = true;
9130
9131 if (flags != bp->flags) {
9132 u32 old_flags = bp->flags;
9133
9134 bp->flags = flags;
9135
Michael Chan2bcfa6f2015-12-27 18:19:24 -05009136 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04009137 if (update_tpa)
9138 bnxt_set_ring_params(bp);
9139 return rc;
9140 }
9141
9142 if (re_init) {
9143 bnxt_close_nic(bp, false, false);
9144 if (update_tpa)
9145 bnxt_set_ring_params(bp);
9146
9147 return bnxt_open_nic(bp, false, false);
9148 }
9149 if (update_tpa) {
9150 rc = bnxt_set_tpa(bp,
9151 (flags & BNXT_FLAG_TPA) ?
9152 true : false);
9153 if (rc)
9154 bp->flags = old_flags;
9155 }
9156 }
9157 return rc;
9158}
9159
Michael Chanffd77622018-11-15 03:25:40 -05009160static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9161 u32 ring_id, u32 *prod, u32 *cons)
9162{
9163 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9164 struct hwrm_dbg_ring_info_get_input req = {0};
9165 int rc;
9166
9167 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9168 req.ring_type = ring_type;
9169 req.fw_ring_id = cpu_to_le32(ring_id);
9170 mutex_lock(&bp->hwrm_cmd_lock);
9171 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9172 if (!rc) {
9173 *prod = le32_to_cpu(resp->producer_index);
9174 *cons = le32_to_cpu(resp->consumer_index);
9175 }
9176 mutex_unlock(&bp->hwrm_cmd_lock);
9177 return rc;
9178}
9179
Michael Chan9f554592016-01-02 23:44:58 -05009180static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9181{
Michael Chanb6ab4b02016-01-02 23:44:59 -05009182 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05009183 int i = bnapi->index;
9184
Michael Chan3b2b7d92016-01-02 23:45:00 -05009185 if (!txr)
9186 return;
9187
Michael Chan9f554592016-01-02 23:44:58 -05009188 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9189 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9190 txr->tx_cons);
9191}
9192
9193static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9194{
Michael Chanb6ab4b02016-01-02 23:44:59 -05009195 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05009196 int i = bnapi->index;
9197
Michael Chan3b2b7d92016-01-02 23:45:00 -05009198 if (!rxr)
9199 return;
9200
Michael Chan9f554592016-01-02 23:44:58 -05009201 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9202 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9203 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9204 rxr->rx_sw_agg_prod);
9205}
9206
9207static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9208{
9209 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9210 int i = bnapi->index;
9211
9212 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9213 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9214}
9215
Michael Chanc0c050c2015-10-22 16:01:17 -04009216static void bnxt_dbg_dump_states(struct bnxt *bp)
9217{
9218 int i;
9219 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04009220
9221 for (i = 0; i < bp->cp_nr_rings; i++) {
9222 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04009223 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05009224 bnxt_dump_tx_sw_state(bnapi);
9225 bnxt_dump_rx_sw_state(bnapi);
9226 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04009227 }
9228 }
9229}
9230
Michael Chan6988bd92016-06-13 02:25:29 -04009231static void bnxt_reset_task(struct bnxt *bp, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04009232{
Michael Chan6988bd92016-06-13 02:25:29 -04009233 if (!silent)
9234 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05009235 if (netif_running(bp->dev)) {
Michael Chanb386cd32017-03-08 18:44:33 -05009236 int rc;
9237
9238 if (!silent)
9239 bnxt_ulp_stop(bp);
Michael Chan028de142015-12-09 19:35:44 -05009240 bnxt_close_nic(bp, false, false);
Michael Chanb386cd32017-03-08 18:44:33 -05009241 rc = bnxt_open_nic(bp, false, false);
9242 if (!silent && !rc)
9243 bnxt_ulp_start(bp);
Michael Chan028de142015-12-09 19:35:44 -05009244 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009245}
9246
9247static void bnxt_tx_timeout(struct net_device *dev)
9248{
9249 struct bnxt *bp = netdev_priv(dev);
9250
9251 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9252 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04009253 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009254}
9255
Kees Cooke99e88a2017-10-16 14:43:17 -07009256static void bnxt_timer(struct timer_list *t)
Michael Chanc0c050c2015-10-22 16:01:17 -04009257{
Kees Cooke99e88a2017-10-16 14:43:17 -07009258 struct bnxt *bp = from_timer(bp, t, timer);
Michael Chanc0c050c2015-10-22 16:01:17 -04009259 struct net_device *dev = bp->dev;
9260
9261 if (!netif_running(dev))
9262 return;
9263
9264 if (atomic_read(&bp->intr_sem) != 0)
9265 goto bnxt_restart_timer;
9266
Michael Chanadcc3312017-07-24 12:34:24 -04009267 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9268 bp->stats_coal_ticks) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05009269 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04009270 bnxt_queue_sp_work(bp);
Michael Chan3bdf56c2016-03-07 15:38:45 -05009271 }
Sathya Perla5a84acb2017-10-26 11:51:31 -04009272
9273 if (bnxt_tc_flower_enabled(bp)) {
9274 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9275 bnxt_queue_sp_work(bp);
9276 }
Michael Chana1ef4a72018-08-05 16:51:49 -04009277
9278 if (bp->link_info.phy_retry) {
9279 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9280 bp->link_info.phy_retry = 0;
9281 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9282 } else {
9283 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9284 bnxt_queue_sp_work(bp);
9285 }
9286 }
Michael Chanffd77622018-11-15 03:25:40 -05009287
9288 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9289 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9290 bnxt_queue_sp_work(bp);
9291 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009292bnxt_restart_timer:
9293 mod_timer(&bp->timer, jiffies + bp->current_interval);
9294}
9295
Michael Chana551ee92017-01-25 02:55:07 -05009296static void bnxt_rtnl_lock_sp(struct bnxt *bp)
Michael Chan6988bd92016-06-13 02:25:29 -04009297{
Michael Chana551ee92017-01-25 02:55:07 -05009298 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9299 * set. If the device is being closed, bnxt_close() may be holding
Michael Chan6988bd92016-06-13 02:25:29 -04009300 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
9301 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9302 */
9303 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9304 rtnl_lock();
Michael Chana551ee92017-01-25 02:55:07 -05009305}
9306
9307static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9308{
Michael Chan6988bd92016-06-13 02:25:29 -04009309 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9310 rtnl_unlock();
9311}
9312
Michael Chana551ee92017-01-25 02:55:07 -05009313/* Only called from bnxt_sp_task() */
9314static void bnxt_reset(struct bnxt *bp, bool silent)
9315{
9316 bnxt_rtnl_lock_sp(bp);
9317 if (test_bit(BNXT_STATE_OPEN, &bp->state))
9318 bnxt_reset_task(bp, silent);
9319 bnxt_rtnl_unlock_sp(bp);
9320}
9321
Michael Chanffd77622018-11-15 03:25:40 -05009322static void bnxt_chk_missed_irq(struct bnxt *bp)
9323{
9324 int i;
9325
9326 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9327 return;
9328
9329 for (i = 0; i < bp->cp_nr_rings; i++) {
9330 struct bnxt_napi *bnapi = bp->bnapi[i];
9331 struct bnxt_cp_ring_info *cpr;
9332 u32 fw_ring_id;
9333 int j;
9334
9335 if (!bnapi)
9336 continue;
9337
9338 cpr = &bnapi->cp_ring;
9339 for (j = 0; j < 2; j++) {
9340 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9341 u32 val[2];
9342
9343 if (!cpr2 || cpr2->has_more_work ||
9344 !bnxt_has_work(bp, cpr2))
9345 continue;
9346
9347 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9348 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9349 continue;
9350 }
9351 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9352 bnxt_dbg_hwrm_ring_info_get(bp,
9353 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9354 fw_ring_id, &val[0], &val[1]);
Michael Chan83eb5c52018-11-15 03:25:41 -05009355 cpr->missed_irqs++;
Michael Chanffd77622018-11-15 03:25:40 -05009356 }
9357 }
9358}
9359
Michael Chanc0c050c2015-10-22 16:01:17 -04009360static void bnxt_cfg_ntp_filters(struct bnxt *);
9361
9362static void bnxt_sp_task(struct work_struct *work)
9363{
9364 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04009365
Michael Chan4cebdce2015-12-09 19:35:43 -05009366 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9367 smp_mb__after_atomic();
9368 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9369 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04009370 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05009371 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009372
9373 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9374 bnxt_cfg_rx_mode(bp);
9375
9376 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9377 bnxt_cfg_ntp_filters(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009378 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9379 bnxt_hwrm_exec_fwd_req(bp);
9380 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9381 bnxt_hwrm_tunnel_dst_port_alloc(
9382 bp, bp->vxlan_port,
9383 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9384 }
9385 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9386 bnxt_hwrm_tunnel_dst_port_free(
9387 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9388 }
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07009389 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9390 bnxt_hwrm_tunnel_dst_port_alloc(
9391 bp, bp->nge_port,
9392 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9393 }
9394 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9395 bnxt_hwrm_tunnel_dst_port_free(
9396 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9397 }
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04009398 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05009399 bnxt_hwrm_port_qstats(bp);
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04009400 bnxt_hwrm_port_qstats_ext(bp);
9401 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05009402
Michael Chan0eaa24b2017-01-25 02:55:08 -05009403 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
Michael Chane2dc9b62017-10-13 21:09:30 -04009404 int rc;
Michael Chan0eaa24b2017-01-25 02:55:08 -05009405
Michael Chane2dc9b62017-10-13 21:09:30 -04009406 mutex_lock(&bp->link_lock);
Michael Chan0eaa24b2017-01-25 02:55:08 -05009407 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9408 &bp->sp_event))
9409 bnxt_hwrm_phy_qcaps(bp);
9410
Michael Chane2dc9b62017-10-13 21:09:30 -04009411 rc = bnxt_update_link(bp, true);
9412 mutex_unlock(&bp->link_lock);
Michael Chan0eaa24b2017-01-25 02:55:08 -05009413 if (rc)
9414 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9415 rc);
9416 }
Michael Chana1ef4a72018-08-05 16:51:49 -04009417 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9418 int rc;
9419
9420 mutex_lock(&bp->link_lock);
9421 rc = bnxt_update_phy_setting(bp);
9422 mutex_unlock(&bp->link_lock);
9423 if (rc) {
9424 netdev_warn(bp->dev, "update phy settings retry failed\n");
9425 } else {
9426 bp->link_info.phy_retry = false;
9427 netdev_info(bp->dev, "update phy settings retry succeeded\n");
9428 }
9429 }
Michael Chan90c694b2017-01-25 02:55:09 -05009430 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
Michael Chane2dc9b62017-10-13 21:09:30 -04009431 mutex_lock(&bp->link_lock);
9432 bnxt_get_port_module_status(bp);
9433 mutex_unlock(&bp->link_lock);
Michael Chan90c694b2017-01-25 02:55:09 -05009434 }
Sathya Perla5a84acb2017-10-26 11:51:31 -04009435
9436 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9437 bnxt_tc_flow_stats_work(bp);
9438
Michael Chanffd77622018-11-15 03:25:40 -05009439 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9440 bnxt_chk_missed_irq(bp);
9441
Michael Chane2dc9b62017-10-13 21:09:30 -04009442 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
9443 * must be the last functions to be called before exiting.
9444 */
Michael Chanc0c050c2015-10-22 16:01:17 -04009445 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9446 bnxt_reset(bp, false);
9447
9448 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9449 bnxt_reset(bp, true);
9450
Michael Chanc0c050c2015-10-22 16:01:17 -04009451 smp_mb__before_atomic();
9452 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9453}
9454
Michael Chand1e79252017-02-06 16:55:38 -05009455/* Under rtnl_lock */
Michael Chan98fdbe72017-08-28 13:40:26 -04009456int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9457 int tx_xdp)
Michael Chand1e79252017-02-06 16:55:38 -05009458{
9459 int max_rx, max_tx, tx_sets = 1;
Vasundhara Volam780baad2018-12-16 18:46:23 -05009460 int tx_rings_needed, stats;
Michael Chan8f23d632018-01-17 03:21:12 -05009461 int rx_rings = rx;
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05009462 int cp, vnics, rc;
Michael Chand1e79252017-02-06 16:55:38 -05009463
Michael Chand1e79252017-02-06 16:55:38 -05009464 if (tcs)
9465 tx_sets = tcs;
9466
9467 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9468 if (rc)
9469 return rc;
9470
9471 if (max_rx < rx)
9472 return -ENOMEM;
9473
Michael Chan5f449242017-02-06 16:55:40 -05009474 tx_rings_needed = tx * tx_sets + tx_xdp;
Michael Chand1e79252017-02-06 16:55:38 -05009475 if (max_tx < tx_rings_needed)
9476 return -ENOMEM;
9477
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05009478 vnics = 1;
9479 if (bp->flags & BNXT_FLAG_RFS)
9480 vnics += rx_rings;
9481
Michael Chan8f23d632018-01-17 03:21:12 -05009482 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9483 rx_rings <<= 1;
9484 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
Vasundhara Volam780baad2018-12-16 18:46:23 -05009485 stats = cp;
9486 if (BNXT_NEW_RM(bp)) {
Michael Chan11c3ec72018-04-11 11:50:17 -04009487 cp += bnxt_get_ulp_msix_num(bp);
Vasundhara Volam780baad2018-12-16 18:46:23 -05009488 stats += bnxt_get_ulp_stat_ctxs(bp);
9489 }
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05009490 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
Vasundhara Volam780baad2018-12-16 18:46:23 -05009491 stats, vnics);
Michael Chand1e79252017-02-06 16:55:38 -05009492}
9493
Sathya Perla17086392017-02-20 19:25:18 -05009494static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9495{
9496 if (bp->bar2) {
9497 pci_iounmap(pdev, bp->bar2);
9498 bp->bar2 = NULL;
9499 }
9500
9501 if (bp->bar1) {
9502 pci_iounmap(pdev, bp->bar1);
9503 bp->bar1 = NULL;
9504 }
9505
9506 if (bp->bar0) {
9507 pci_iounmap(pdev, bp->bar0);
9508 bp->bar0 = NULL;
9509 }
9510}
9511
9512static void bnxt_cleanup_pci(struct bnxt *bp)
9513{
9514 bnxt_unmap_bars(bp, bp->pdev);
9515 pci_release_regions(bp->pdev);
9516 pci_disable_device(bp->pdev);
9517}
9518
Michael Chan18775aa2017-10-26 11:51:27 -04009519static void bnxt_init_dflt_coal(struct bnxt *bp)
9520{
9521 struct bnxt_coal *coal;
9522
9523 /* Tick values in micro seconds.
9524 * 1 coal_buf x bufs_per_record = 1 completion record.
9525 */
9526 coal = &bp->rx_coal;
Michael Chan0c2ff8d2018-12-20 03:38:53 -05009527 coal->coal_ticks = 10;
Michael Chan18775aa2017-10-26 11:51:27 -04009528 coal->coal_bufs = 30;
9529 coal->coal_ticks_irq = 1;
9530 coal->coal_bufs_irq = 2;
Andy Gospodarek05abe4dd2018-04-26 17:44:38 -04009531 coal->idle_thresh = 50;
Michael Chan18775aa2017-10-26 11:51:27 -04009532 coal->bufs_per_record = 2;
9533 coal->budget = 64; /* NAPI budget */
9534
9535 coal = &bp->tx_coal;
9536 coal->coal_ticks = 28;
9537 coal->coal_bufs = 30;
9538 coal->coal_ticks_irq = 2;
9539 coal->coal_bufs_irq = 2;
9540 coal->bufs_per_record = 1;
9541
9542 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9543}
9544
Michael Chanc0c050c2015-10-22 16:01:17 -04009545static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9546{
9547 int rc;
9548 struct bnxt *bp = netdev_priv(dev);
9549
9550 SET_NETDEV_DEV(dev, &pdev->dev);
9551
9552 /* enable device (incl. PCI PM wakeup), and bus-mastering */
9553 rc = pci_enable_device(pdev);
9554 if (rc) {
9555 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9556 goto init_err;
9557 }
9558
9559 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9560 dev_err(&pdev->dev,
9561 "Cannot find PCI device base address, aborting\n");
9562 rc = -ENODEV;
9563 goto init_err_disable;
9564 }
9565
9566 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9567 if (rc) {
9568 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9569 goto init_err_disable;
9570 }
9571
9572 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9573 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9574 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9575 goto init_err_disable;
9576 }
9577
9578 pci_set_master(pdev);
9579
9580 bp->dev = dev;
9581 bp->pdev = pdev;
9582
9583 bp->bar0 = pci_ioremap_bar(pdev, 0);
9584 if (!bp->bar0) {
9585 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9586 rc = -ENOMEM;
9587 goto init_err_release;
9588 }
9589
9590 bp->bar1 = pci_ioremap_bar(pdev, 2);
9591 if (!bp->bar1) {
9592 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9593 rc = -ENOMEM;
9594 goto init_err_release;
9595 }
9596
9597 bp->bar2 = pci_ioremap_bar(pdev, 4);
9598 if (!bp->bar2) {
9599 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9600 rc = -ENOMEM;
9601 goto init_err_release;
9602 }
9603
Satish Baddipadige6316ea62016-03-07 15:38:48 -05009604 pci_enable_pcie_error_reporting(pdev);
9605
Michael Chanc0c050c2015-10-22 16:01:17 -04009606 INIT_WORK(&bp->sp_task, bnxt_sp_task);
9607
9608 spin_lock_init(&bp->ntp_fltr_lock);
Michael Chan697197e2018-10-14 07:02:46 -04009609#if BITS_PER_LONG == 32
9610 spin_lock_init(&bp->db_lock);
9611#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04009612
9613 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9614 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9615
Michael Chan18775aa2017-10-26 11:51:27 -04009616 bnxt_init_dflt_coal(bp);
Michael Chan51f30782016-07-01 18:46:29 -04009617
Kees Cooke99e88a2017-10-16 14:43:17 -07009618 timer_setup(&bp->timer, bnxt_timer, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04009619 bp->current_interval = BNXT_TIMER_INTERVAL;
9620
Michael Chancaefe522015-12-09 19:35:42 -05009621 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04009622 return 0;
9623
9624init_err_release:
Sathya Perla17086392017-02-20 19:25:18 -05009625 bnxt_unmap_bars(bp, pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04009626 pci_release_regions(pdev);
9627
9628init_err_disable:
9629 pci_disable_device(pdev);
9630
9631init_err:
9632 return rc;
9633}
9634
9635/* rtnl_lock held */
9636static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9637{
9638 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05009639 struct bnxt *bp = netdev_priv(dev);
9640 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04009641
9642 if (!is_valid_ether_addr(addr->sa_data))
9643 return -EADDRNOTAVAIL;
9644
Michael Chanc1a7bdf2017-10-26 11:51:24 -04009645 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9646 return 0;
9647
Michael Chan28ea3342018-09-14 15:41:29 -04009648 rc = bnxt_approve_mac(bp, addr->sa_data, true);
Michael Chan84c33dd2016-04-11 04:11:13 -04009649 if (rc)
9650 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009651
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05009652 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9653 if (netif_running(dev)) {
9654 bnxt_close_nic(bp, false, false);
9655 rc = bnxt_open_nic(bp, false, false);
9656 }
9657
9658 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009659}
9660
9661/* rtnl_lock held */
9662static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9663{
9664 struct bnxt *bp = netdev_priv(dev);
9665
Michael Chanc0c050c2015-10-22 16:01:17 -04009666 if (netif_running(dev))
9667 bnxt_close_nic(bp, false, false);
9668
9669 dev->mtu = new_mtu;
9670 bnxt_set_ring_params(bp);
9671
9672 if (netif_running(dev))
9673 return bnxt_open_nic(bp, false, false);
9674
9675 return 0;
9676}
9677
Michael Chanc5e3deb2016-12-02 21:17:15 -05009678int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
Michael Chanc0c050c2015-10-22 16:01:17 -04009679{
9680 struct bnxt *bp = netdev_priv(dev);
Michael Chan3ffb6a32016-11-11 00:11:42 -05009681 bool sh = false;
Michael Chand1e79252017-02-06 16:55:38 -05009682 int rc;
John Fastabend16e5cc62016-02-16 21:16:43 -08009683
Michael Chanc0c050c2015-10-22 16:01:17 -04009684 if (tc > bp->max_tc) {
Michael Chanb451c8b2017-02-12 19:18:17 -05009685 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04009686 tc, bp->max_tc);
9687 return -EINVAL;
9688 }
9689
9690 if (netdev_get_num_tc(dev) == tc)
9691 return 0;
9692
Michael Chan3ffb6a32016-11-11 00:11:42 -05009693 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9694 sh = true;
9695
Michael Chan98fdbe72017-08-28 13:40:26 -04009696 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9697 sh, tc, bp->tx_nr_rings_xdp);
Michael Chand1e79252017-02-06 16:55:38 -05009698 if (rc)
9699 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009700
9701 /* Needs to close the device and do hw resource re-allocations */
9702 if (netif_running(bp->dev))
9703 bnxt_close_nic(bp, true, false);
9704
9705 if (tc) {
9706 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9707 netdev_set_num_tc(dev, tc);
9708 } else {
9709 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9710 netdev_reset_tc(dev);
9711 }
Michael Chan87e9b372017-08-23 19:34:03 -04009712 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
Michael Chan3ffb6a32016-11-11 00:11:42 -05009713 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9714 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04009715
9716 if (netif_running(bp->dev))
9717 return bnxt_open_nic(bp, true, false);
9718
9719 return 0;
9720}
9721
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009722static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9723 void *cb_priv)
Sathya Perla2ae74082017-08-28 13:40:33 -04009724{
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009725 struct bnxt *bp = cb_priv;
Sathya Perla2ae74082017-08-28 13:40:33 -04009726
Jakub Kicinski312324f2018-01-25 14:00:48 -08009727 if (!bnxt_tc_flower_enabled(bp) ||
9728 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
Sathya Perla2ae74082017-08-28 13:40:33 -04009729 return -EOPNOTSUPP;
9730
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009731 switch (type) {
9732 case TC_SETUP_CLSFLOWER:
9733 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9734 default:
9735 return -EOPNOTSUPP;
9736 }
9737}
9738
9739static int bnxt_setup_tc_block(struct net_device *dev,
9740 struct tc_block_offload *f)
9741{
9742 struct bnxt *bp = netdev_priv(dev);
9743
9744 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9745 return -EOPNOTSUPP;
9746
9747 switch (f->command) {
9748 case TC_BLOCK_BIND:
9749 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
John Hurley60513bd2018-06-25 14:30:04 -07009750 bp, bp, f->extack);
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009751 case TC_BLOCK_UNBIND:
9752 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9753 return 0;
9754 default:
9755 return -EOPNOTSUPP;
9756 }
Sathya Perla2ae74082017-08-28 13:40:33 -04009757}
9758
Jiri Pirko2572ac52017-08-07 10:15:17 +02009759static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02009760 void *type_data)
Michael Chanc5e3deb2016-12-02 21:17:15 -05009761{
Sathya Perla2ae74082017-08-28 13:40:33 -04009762 switch (type) {
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009763 case TC_SETUP_BLOCK:
9764 return bnxt_setup_tc_block(dev, type_data);
Nogah Frankel575ed7d2017-11-06 07:23:42 +01009765 case TC_SETUP_QDISC_MQPRIO: {
Sathya Perla2ae74082017-08-28 13:40:33 -04009766 struct tc_mqprio_qopt *mqprio = type_data;
Jiri Pirkode4784c2017-08-07 10:15:32 +02009767
Sathya Perla2ae74082017-08-28 13:40:33 -04009768 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9769
9770 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9771 }
9772 default:
Jiri Pirko38cf0422017-08-07 10:15:31 +02009773 return -EOPNOTSUPP;
Sathya Perla2ae74082017-08-28 13:40:33 -04009774 }
Michael Chanc5e3deb2016-12-02 21:17:15 -05009775}
9776
Michael Chanc0c050c2015-10-22 16:01:17 -04009777#ifdef CONFIG_RFS_ACCEL
9778static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9779 struct bnxt_ntuple_filter *f2)
9780{
9781 struct flow_keys *keys1 = &f1->fkeys;
9782 struct flow_keys *keys2 = &f2->fkeys;
9783
9784 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9785 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9786 keys1->ports.ports == keys2->ports.ports &&
9787 keys1->basic.ip_proto == keys2->basic.ip_proto &&
9788 keys1->basic.n_proto == keys2->basic.n_proto &&
Michael Chan61aad722017-02-12 19:18:14 -05009789 keys1->control.flags == keys2->control.flags &&
Michael Chana54c4d72016-07-25 12:33:35 -04009790 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9791 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
Michael Chanc0c050c2015-10-22 16:01:17 -04009792 return true;
9793
9794 return false;
9795}
9796
9797static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9798 u16 rxq_index, u32 flow_id)
9799{
9800 struct bnxt *bp = netdev_priv(dev);
9801 struct bnxt_ntuple_filter *fltr, *new_fltr;
9802 struct flow_keys *fkeys;
9803 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chana54c4d72016-07-25 12:33:35 -04009804 int rc = 0, idx, bit_id, l2_idx = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04009805 struct hlist_head *head;
9806
Michael Chana54c4d72016-07-25 12:33:35 -04009807 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9808 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9809 int off = 0, j;
9810
9811 netif_addr_lock_bh(dev);
9812 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9813 if (ether_addr_equal(eth->h_dest,
9814 vnic->uc_list + off)) {
9815 l2_idx = j + 1;
9816 break;
9817 }
9818 }
9819 netif_addr_unlock_bh(dev);
9820 if (!l2_idx)
9821 return -EINVAL;
9822 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009823 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9824 if (!new_fltr)
9825 return -ENOMEM;
9826
9827 fkeys = &new_fltr->fkeys;
9828 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9829 rc = -EPROTONOSUPPORT;
9830 goto err_free;
9831 }
9832
Michael Chandda0e742016-12-29 12:13:40 -05009833 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9834 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
Michael Chanc0c050c2015-10-22 16:01:17 -04009835 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9836 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9837 rc = -EPROTONOSUPPORT;
9838 goto err_free;
9839 }
Michael Chandda0e742016-12-29 12:13:40 -05009840 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9841 bp->hwrm_spec_code < 0x10601) {
9842 rc = -EPROTONOSUPPORT;
9843 goto err_free;
9844 }
Michael Chan61aad722017-02-12 19:18:14 -05009845 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9846 bp->hwrm_spec_code < 0x10601) {
9847 rc = -EPROTONOSUPPORT;
9848 goto err_free;
9849 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009850
Michael Chana54c4d72016-07-25 12:33:35 -04009851 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04009852 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9853
9854 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9855 head = &bp->ntp_fltr_hash_tbl[idx];
9856 rcu_read_lock();
9857 hlist_for_each_entry_rcu(fltr, head, hash) {
9858 if (bnxt_fltr_match(fltr, new_fltr)) {
9859 rcu_read_unlock();
9860 rc = 0;
9861 goto err_free;
9862 }
9863 }
9864 rcu_read_unlock();
9865
9866 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05009867 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9868 BNXT_NTP_FLTR_MAX_FLTR, 0);
9869 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04009870 spin_unlock_bh(&bp->ntp_fltr_lock);
9871 rc = -ENOMEM;
9872 goto err_free;
9873 }
9874
Michael Chan84e86b92015-11-05 16:25:50 -05009875 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04009876 new_fltr->flow_id = flow_id;
Michael Chana54c4d72016-07-25 12:33:35 -04009877 new_fltr->l2_fltr_idx = l2_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04009878 new_fltr->rxq = rxq_index;
9879 hlist_add_head_rcu(&new_fltr->hash, head);
9880 bp->ntp_fltr_count++;
9881 spin_unlock_bh(&bp->ntp_fltr_lock);
9882
9883 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04009884 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009885
9886 return new_fltr->sw_id;
9887
9888err_free:
9889 kfree(new_fltr);
9890 return rc;
9891}
9892
9893static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9894{
9895 int i;
9896
9897 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9898 struct hlist_head *head;
9899 struct hlist_node *tmp;
9900 struct bnxt_ntuple_filter *fltr;
9901 int rc;
9902
9903 head = &bp->ntp_fltr_hash_tbl[i];
9904 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9905 bool del = false;
9906
9907 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9908 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9909 fltr->flow_id,
9910 fltr->sw_id)) {
9911 bnxt_hwrm_cfa_ntuple_filter_free(bp,
9912 fltr);
9913 del = true;
9914 }
9915 } else {
9916 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9917 fltr);
9918 if (rc)
9919 del = true;
9920 else
9921 set_bit(BNXT_FLTR_VALID, &fltr->state);
9922 }
9923
9924 if (del) {
9925 spin_lock_bh(&bp->ntp_fltr_lock);
9926 hlist_del_rcu(&fltr->hash);
9927 bp->ntp_fltr_count--;
9928 spin_unlock_bh(&bp->ntp_fltr_lock);
9929 synchronize_rcu();
9930 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9931 kfree(fltr);
9932 }
9933 }
9934 }
Jeffrey Huang19241362016-02-26 04:00:00 -05009935 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9936 netdev_info(bp->dev, "Receive PF driver unload event!");
Michael Chanc0c050c2015-10-22 16:01:17 -04009937}
9938
9939#else
9940
9941static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9942{
9943}
9944
9945#endif /* CONFIG_RFS_ACCEL */
9946
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009947static void bnxt_udp_tunnel_add(struct net_device *dev,
9948 struct udp_tunnel_info *ti)
Michael Chanc0c050c2015-10-22 16:01:17 -04009949{
9950 struct bnxt *bp = netdev_priv(dev);
9951
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009952 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9953 return;
9954
Michael Chanc0c050c2015-10-22 16:01:17 -04009955 if (!netif_running(dev))
9956 return;
9957
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009958 switch (ti->type) {
9959 case UDP_TUNNEL_TYPE_VXLAN:
9960 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9961 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04009962
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009963 bp->vxlan_port_cnt++;
9964 if (bp->vxlan_port_cnt == 1) {
9965 bp->vxlan_port = ti->port;
9966 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04009967 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009968 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009969 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07009970 case UDP_TUNNEL_TYPE_GENEVE:
9971 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9972 return;
9973
9974 bp->nge_port_cnt++;
9975 if (bp->nge_port_cnt == 1) {
9976 bp->nge_port = ti->port;
9977 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9978 }
9979 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009980 default:
9981 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04009982 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009983
Michael Chanc213eae2017-10-13 21:09:29 -04009984 bnxt_queue_sp_work(bp);
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009985}
9986
9987static void bnxt_udp_tunnel_del(struct net_device *dev,
9988 struct udp_tunnel_info *ti)
9989{
9990 struct bnxt *bp = netdev_priv(dev);
9991
9992 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9993 return;
9994
9995 if (!netif_running(dev))
9996 return;
9997
9998 switch (ti->type) {
9999 case UDP_TUNNEL_TYPE_VXLAN:
10000 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
10001 return;
10002 bp->vxlan_port_cnt--;
10003
10004 if (bp->vxlan_port_cnt != 0)
10005 return;
10006
10007 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
10008 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -070010009 case UDP_TUNNEL_TYPE_GENEVE:
10010 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
10011 return;
10012 bp->nge_port_cnt--;
10013
10014 if (bp->nge_port_cnt != 0)
10015 return;
10016
10017 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
10018 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -070010019 default:
10020 return;
10021 }
10022
Michael Chanc213eae2017-10-13 21:09:29 -040010023 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010024}
10025
Michael Chan39d8ba22017-07-24 12:34:22 -040010026static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10027 struct net_device *dev, u32 filter_mask,
10028 int nlflags)
10029{
10030 struct bnxt *bp = netdev_priv(dev);
10031
10032 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
10033 nlflags, filter_mask, NULL);
10034}
10035
10036static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
Petr Machata2fd527b2018-12-12 17:02:48 +000010037 u16 flags, struct netlink_ext_ack *extack)
Michael Chan39d8ba22017-07-24 12:34:22 -040010038{
10039 struct bnxt *bp = netdev_priv(dev);
10040 struct nlattr *attr, *br_spec;
10041 int rem, rc = 0;
10042
10043 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
10044 return -EOPNOTSUPP;
10045
10046 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10047 if (!br_spec)
10048 return -EINVAL;
10049
10050 nla_for_each_nested(attr, br_spec, rem) {
10051 u16 mode;
10052
10053 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10054 continue;
10055
10056 if (nla_len(attr) < sizeof(mode))
10057 return -EINVAL;
10058
10059 mode = nla_get_u16(attr);
10060 if (mode == bp->br_mode)
10061 break;
10062
10063 rc = bnxt_hwrm_set_br_mode(bp, mode);
10064 if (!rc)
10065 bp->br_mode = mode;
10066 break;
10067 }
10068 return rc;
10069}
10070
Sathya Perlac124a622017-07-24 12:34:29 -040010071static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
10072 size_t len)
10073{
10074 struct bnxt *bp = netdev_priv(dev);
10075 int rc;
10076
10077 /* The PF and it's VF-reps only support the switchdev framework */
10078 if (!BNXT_PF(bp))
10079 return -EOPNOTSUPP;
10080
Sathya Perla53f70b82017-07-25 13:28:41 -040010081 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
Sathya Perlac124a622017-07-24 12:34:29 -040010082
10083 if (rc >= len)
10084 return -EOPNOTSUPP;
10085 return 0;
10086}
10087
Florian Fainelli52d52542019-02-06 09:45:36 -080010088int bnxt_get_port_parent_id(struct net_device *dev,
10089 struct netdev_phys_item_id *ppid)
Sathya Perlac124a622017-07-24 12:34:29 -040010090{
Florian Fainelli52d52542019-02-06 09:45:36 -080010091 struct bnxt *bp = netdev_priv(dev);
10092
Sathya Perlac124a622017-07-24 12:34:29 -040010093 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
10094 return -EOPNOTSUPP;
10095
10096 /* The PF and it's VF-reps only support the switchdev framework */
10097 if (!BNXT_PF(bp))
10098 return -EOPNOTSUPP;
10099
Florian Fainelli52d52542019-02-06 09:45:36 -080010100 ppid->id_len = sizeof(bp->switch_id);
10101 memcpy(ppid->id, bp->switch_id, ppid->id_len);
10102
Sathya Perlac124a622017-07-24 12:34:29 -040010103 return 0;
10104}
10105
Michael Chanc0c050c2015-10-22 16:01:17 -040010106static const struct net_device_ops bnxt_netdev_ops = {
10107 .ndo_open = bnxt_open,
10108 .ndo_start_xmit = bnxt_start_xmit,
10109 .ndo_stop = bnxt_close,
10110 .ndo_get_stats64 = bnxt_get_stats64,
10111 .ndo_set_rx_mode = bnxt_set_rx_mode,
10112 .ndo_do_ioctl = bnxt_ioctl,
10113 .ndo_validate_addr = eth_validate_addr,
10114 .ndo_set_mac_address = bnxt_change_mac_addr,
10115 .ndo_change_mtu = bnxt_change_mtu,
10116 .ndo_fix_features = bnxt_fix_features,
10117 .ndo_set_features = bnxt_set_features,
10118 .ndo_tx_timeout = bnxt_tx_timeout,
10119#ifdef CONFIG_BNXT_SRIOV
10120 .ndo_get_vf_config = bnxt_get_vf_config,
10121 .ndo_set_vf_mac = bnxt_set_vf_mac,
10122 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
10123 .ndo_set_vf_rate = bnxt_set_vf_bw,
10124 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
10125 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
Vasundhara Volam746df132018-03-31 13:54:10 -040010126 .ndo_set_vf_trust = bnxt_set_vf_trust,
Michael Chanc0c050c2015-10-22 16:01:17 -040010127#endif
Michael Chanc0c050c2015-10-22 16:01:17 -040010128 .ndo_setup_tc = bnxt_setup_tc,
10129#ifdef CONFIG_RFS_ACCEL
10130 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
10131#endif
Alexander Duyckad51b8e2016-06-16 12:21:19 -070010132 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
10133 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
Jakub Kicinskif4e63522017-11-03 13:56:16 -070010134 .ndo_bpf = bnxt_xdp,
Michael Chan39d8ba22017-07-24 12:34:22 -040010135 .ndo_bridge_getlink = bnxt_bridge_getlink,
10136 .ndo_bridge_setlink = bnxt_bridge_setlink,
Florian Fainelli52d52542019-02-06 09:45:36 -080010137 .ndo_get_port_parent_id = bnxt_get_port_parent_id,
Sathya Perlac124a622017-07-24 12:34:29 -040010138 .ndo_get_phys_port_name = bnxt_get_phys_port_name
Michael Chanc0c050c2015-10-22 16:01:17 -040010139};
10140
10141static void bnxt_remove_one(struct pci_dev *pdev)
10142{
10143 struct net_device *dev = pci_get_drvdata(pdev);
10144 struct bnxt *bp = netdev_priv(dev);
10145
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040010146 if (BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -040010147 bnxt_sriov_disable(bp);
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040010148 bnxt_dl_unregister(bp);
10149 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010150
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010151 pci_disable_pcie_error_reporting(pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -040010152 unregister_netdev(dev);
Sathya Perla2ae74082017-08-28 13:40:33 -040010153 bnxt_shutdown_tc(bp);
Michael Chanc213eae2017-10-13 21:09:29 -040010154 bnxt_cancel_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010155 bp->sp_event = 0;
10156
Michael Chan78095922016-12-07 00:26:16 -050010157 bnxt_clear_int_mode(bp);
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -050010158 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010159 bnxt_free_hwrm_resources(bp);
Deepak Khungare605db82017-05-29 19:06:04 -040010160 bnxt_free_hwrm_short_cmd_req(bp);
Michael Chaneb513652017-04-04 18:14:12 -040010161 bnxt_ethtool_free(bp);
Michael Chan7df4ae92016-12-02 21:17:17 -050010162 bnxt_dcb_free(bp);
Michael Chana588e452016-12-07 00:26:21 -050010163 kfree(bp->edev);
10164 bp->edev = NULL;
Michael Chan98f04cf2018-10-14 07:02:43 -040010165 bnxt_free_ctx_mem(bp);
10166 kfree(bp->ctx);
10167 bp->ctx = NULL;
Sathya Perla17086392017-02-20 19:25:18 -050010168 bnxt_cleanup_pci(bp);
Michael Chanfd3ab1c2018-12-16 18:46:30 -050010169 bnxt_free_port_stats(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010170 free_netdev(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -040010171}
10172
10173static int bnxt_probe_phy(struct bnxt *bp)
10174{
10175 int rc = 0;
10176 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chanc0c050c2015-10-22 16:01:17 -040010177
Michael Chan170ce012016-04-05 14:08:57 -040010178 rc = bnxt_hwrm_phy_qcaps(bp);
10179 if (rc) {
10180 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10181 rc);
10182 return rc;
10183 }
Michael Chane2dc9b62017-10-13 21:09:30 -040010184 mutex_init(&bp->link_lock);
Michael Chan170ce012016-04-05 14:08:57 -040010185
Michael Chanc0c050c2015-10-22 16:01:17 -040010186 rc = bnxt_update_link(bp, false);
10187 if (rc) {
10188 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10189 rc);
10190 return rc;
10191 }
10192
Michael Chan93ed8112016-06-13 02:25:37 -040010193 /* Older firmware does not have supported_auto_speeds, so assume
10194 * that all supported speeds can be autonegotiated.
10195 */
10196 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10197 link_info->support_auto_speeds = link_info->support_speeds;
10198
Michael Chanc0c050c2015-10-22 16:01:17 -040010199 /*initialize the ethool setting copy with NVM settings */
Michael Chan0d8abf02016-02-10 17:33:47 -050010200 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
Michael Chanc9ee9512016-04-05 14:08:56 -040010201 link_info->autoneg = BNXT_AUTONEG_SPEED;
10202 if (bp->hwrm_spec_code >= 0x10201) {
10203 if (link_info->auto_pause_setting &
10204 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10205 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10206 } else {
10207 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10208 }
Michael Chan0d8abf02016-02-10 17:33:47 -050010209 link_info->advertising = link_info->auto_link_speeds;
Michael Chan0d8abf02016-02-10 17:33:47 -050010210 } else {
10211 link_info->req_link_speed = link_info->force_link_speed;
10212 link_info->req_duplex = link_info->duplex_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -040010213 }
Michael Chanc9ee9512016-04-05 14:08:56 -040010214 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10215 link_info->req_flow_ctrl =
10216 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10217 else
10218 link_info->req_flow_ctrl = link_info->force_pause_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -040010219 return rc;
10220}
10221
10222static int bnxt_get_max_irq(struct pci_dev *pdev)
10223{
10224 u16 ctrl;
10225
10226 if (!pdev->msix_cap)
10227 return 1;
10228
10229 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10230 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10231}
10232
Michael Chan6e6c5a52016-01-02 23:45:02 -050010233static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10234 int *max_cp)
Michael Chanc0c050c2015-10-22 16:01:17 -040010235{
Michael Chan6a4f2942018-01-17 03:21:06 -050010236 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
Michael Chane30fbc32018-12-09 07:01:02 -050010237 int max_ring_grps = 0, max_irq;
Michael Chanc0c050c2015-10-22 16:01:17 -040010238
Michael Chan6a4f2942018-01-17 03:21:06 -050010239 *max_tx = hw_resc->max_tx_rings;
10240 *max_rx = hw_resc->max_rx_rings;
Michael Chane30fbc32018-12-09 07:01:02 -050010241 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10242 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10243 bnxt_get_ulp_msix_num(bp),
Vasundhara Volamc027c6b2018-12-16 18:46:21 -050010244 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
Michael Chane30fbc32018-12-09 07:01:02 -050010245 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10246 *max_cp = min_t(int, *max_cp, max_irq);
Michael Chan6a4f2942018-01-17 03:21:06 -050010247 max_ring_grps = hw_resc->max_hw_ring_grps;
Prashant Sreedharan76595192016-07-18 07:15:22 -040010248 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10249 *max_cp -= 1;
10250 *max_rx -= 2;
10251 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010252 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10253 *max_rx >>= 1;
Michael Chane30fbc32018-12-09 07:01:02 -050010254 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10255 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10256 /* On P5 chips, max_cp output param should be available NQs */
10257 *max_cp = max_irq;
10258 }
Michael Chanb72d4a62015-12-27 18:19:27 -050010259 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chan6e6c5a52016-01-02 23:45:02 -050010260}
10261
10262int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10263{
10264 int rx, tx, cp;
10265
10266 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
Michael Chan78f058a2018-07-09 02:24:49 -040010267 *max_rx = rx;
10268 *max_tx = tx;
Michael Chan6e6c5a52016-01-02 23:45:02 -050010269 if (!rx || !tx || !cp)
10270 return -ENOMEM;
10271
Michael Chan6e6c5a52016-01-02 23:45:02 -050010272 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10273}
10274
Michael Chane4060d32016-12-07 00:26:19 -050010275static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10276 bool shared)
10277{
10278 int rc;
10279
10280 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010281 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10282 /* Not enough rings, try disabling agg rings. */
10283 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10284 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
Michael Chan07f4fde2018-07-09 02:24:48 -040010285 if (rc) {
10286 /* set BNXT_FLAG_AGG_RINGS back for consistency */
10287 bp->flags |= BNXT_FLAG_AGG_RINGS;
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010288 return rc;
Michael Chan07f4fde2018-07-09 02:24:48 -040010289 }
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010290 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
Michael Chan1054aee2017-12-16 03:09:42 -050010291 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10292 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010293 bnxt_set_ring_params(bp);
10294 }
Michael Chane4060d32016-12-07 00:26:19 -050010295
10296 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10297 int max_cp, max_stat, max_irq;
10298
10299 /* Reserve minimum resources for RoCE */
10300 max_cp = bnxt_get_max_func_cp_rings(bp);
10301 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10302 max_irq = bnxt_get_max_func_irqs(bp);
10303 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10304 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10305 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10306 return 0;
10307
10308 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10309 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10310 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10311 max_cp = min_t(int, max_cp, max_irq);
10312 max_cp = min_t(int, max_cp, max_stat);
10313 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10314 if (rc)
10315 rc = 0;
10316 }
10317 return rc;
10318}
10319
Michael Chan58ea8012018-01-17 03:21:08 -050010320/* In initial default shared ring setting, each shared ring must have a
10321 * RX/TX ring pair.
10322 */
10323static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10324{
10325 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10326 bp->rx_nr_rings = bp->cp_nr_rings;
10327 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10328 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10329}
10330
Michael Chan702c2212017-05-29 19:06:10 -040010331static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
Michael Chan6e6c5a52016-01-02 23:45:02 -050010332{
10333 int dflt_rings, max_rx_rings, max_tx_rings, rc;
Michael Chan6e6c5a52016-01-02 23:45:02 -050010334
Michael Chan2773dfb2018-04-26 17:44:42 -040010335 if (!bnxt_can_reserve_rings(bp))
10336 return 0;
10337
Michael Chan6e6c5a52016-01-02 23:45:02 -050010338 if (sh)
10339 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10340 dflt_rings = netif_get_num_default_rss_queues();
Michael Chan1d3ef132018-03-31 13:54:07 -040010341 /* Reduce default rings on multi-port cards so that total default
10342 * rings do not exceed CPU count.
10343 */
10344 if (bp->port_count > 1) {
10345 int max_rings =
10346 max_t(int, num_online_cpus() / bp->port_count, 1);
10347
10348 dflt_rings = min_t(int, dflt_rings, max_rings);
10349 }
Michael Chane4060d32016-12-07 00:26:19 -050010350 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -050010351 if (rc)
10352 return rc;
10353 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10354 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
Michael Chan58ea8012018-01-17 03:21:08 -050010355 if (sh)
10356 bnxt_trim_dflt_sh_rings(bp);
10357 else
10358 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10359 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
Michael Chan391be5c2016-12-29 12:13:41 -050010360
Michael Chan674f50a2018-01-17 03:21:09 -050010361 rc = __bnxt_reserve_rings(bp);
Michael Chan391be5c2016-12-29 12:13:41 -050010362 if (rc)
10363 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
Michael Chan58ea8012018-01-17 03:21:08 -050010364 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10365 if (sh)
10366 bnxt_trim_dflt_sh_rings(bp);
Michael Chan391be5c2016-12-29 12:13:41 -050010367
Michael Chan674f50a2018-01-17 03:21:09 -050010368 /* Rings may have been trimmed, re-reserve the trimmed rings. */
10369 if (bnxt_need_reserve_rings(bp)) {
10370 rc = __bnxt_reserve_rings(bp);
10371 if (rc)
10372 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10373 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10374 }
Prashant Sreedharan76595192016-07-18 07:15:22 -040010375 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10376 bp->rx_nr_rings++;
10377 bp->cp_nr_rings++;
10378 }
Michael Chan6e6c5a52016-01-02 23:45:02 -050010379 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -040010380}
10381
Michael Chan47558ac2018-04-26 17:44:44 -040010382static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10383{
10384 int rc;
10385
10386 if (bp->tx_nr_rings)
10387 return 0;
10388
Michael Chan6b95c3e2018-09-03 04:23:17 -040010389 bnxt_ulp_irq_stop(bp);
10390 bnxt_clear_int_mode(bp);
Michael Chan47558ac2018-04-26 17:44:44 -040010391 rc = bnxt_set_dflt_rings(bp, true);
10392 if (rc) {
10393 netdev_err(bp->dev, "Not enough rings available.\n");
Michael Chan6b95c3e2018-09-03 04:23:17 -040010394 goto init_dflt_ring_err;
Michael Chan47558ac2018-04-26 17:44:44 -040010395 }
10396 rc = bnxt_init_int_mode(bp);
10397 if (rc)
Michael Chan6b95c3e2018-09-03 04:23:17 -040010398 goto init_dflt_ring_err;
10399
Michael Chan47558ac2018-04-26 17:44:44 -040010400 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10401 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10402 bp->flags |= BNXT_FLAG_RFS;
10403 bp->dev->features |= NETIF_F_NTUPLE;
10404 }
Michael Chan6b95c3e2018-09-03 04:23:17 -040010405init_dflt_ring_err:
10406 bnxt_ulp_irq_restart(bp, rc);
10407 return rc;
Michael Chan47558ac2018-04-26 17:44:44 -040010408}
10409
Michael Chan80fcaf42018-01-17 03:21:05 -050010410int bnxt_restore_pf_fw_resources(struct bnxt *bp)
Michael Chan7b08f662016-12-07 00:26:18 -050010411{
Michael Chan80fcaf42018-01-17 03:21:05 -050010412 int rc;
10413
Michael Chan7b08f662016-12-07 00:26:18 -050010414 ASSERT_RTNL();
10415 bnxt_hwrm_func_qcaps(bp);
Venkat Duvvuru1a037782018-03-09 23:46:09 -050010416
10417 if (netif_running(bp->dev))
10418 __bnxt_close_nic(bp, true, false);
10419
Michael Chanec86f142018-03-31 13:54:21 -040010420 bnxt_ulp_irq_stop(bp);
Michael Chan80fcaf42018-01-17 03:21:05 -050010421 bnxt_clear_int_mode(bp);
10422 rc = bnxt_init_int_mode(bp);
Michael Chanec86f142018-03-31 13:54:21 -040010423 bnxt_ulp_irq_restart(bp, rc);
Venkat Duvvuru1a037782018-03-09 23:46:09 -050010424
10425 if (netif_running(bp->dev)) {
10426 if (rc)
10427 dev_close(bp->dev);
10428 else
10429 rc = bnxt_open_nic(bp, true, false);
10430 }
10431
Michael Chan80fcaf42018-01-17 03:21:05 -050010432 return rc;
Michael Chan7b08f662016-12-07 00:26:18 -050010433}
10434
Michael Chana22a6ac2017-08-23 19:34:05 -040010435static int bnxt_init_mac_addr(struct bnxt *bp)
10436{
10437 int rc = 0;
10438
10439 if (BNXT_PF(bp)) {
10440 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10441 } else {
10442#ifdef CONFIG_BNXT_SRIOV
10443 struct bnxt_vf_info *vf = &bp->vf;
Michael Chan28ea3342018-09-14 15:41:29 -040010444 bool strict_approval = true;
Michael Chana22a6ac2017-08-23 19:34:05 -040010445
10446 if (is_valid_ether_addr(vf->mac_addr)) {
Vasundhara Volam91cdda42018-01-17 03:21:14 -050010447 /* overwrite netdev dev_addr with admin VF MAC */
Michael Chana22a6ac2017-08-23 19:34:05 -040010448 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
Michael Chan28ea3342018-09-14 15:41:29 -040010449 /* Older PF driver or firmware may not approve this
10450 * correctly.
10451 */
10452 strict_approval = false;
Michael Chana22a6ac2017-08-23 19:34:05 -040010453 } else {
10454 eth_hw_addr_random(bp->dev);
Michael Chana22a6ac2017-08-23 19:34:05 -040010455 }
Michael Chan28ea3342018-09-14 15:41:29 -040010456 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
Michael Chana22a6ac2017-08-23 19:34:05 -040010457#endif
10458 }
10459 return rc;
10460}
10461
Michael Chanc0c050c2015-10-22 16:01:17 -040010462static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10463{
10464 static int version_printed;
10465 struct net_device *dev;
10466 struct bnxt *bp;
Michael Chan6e6c5a52016-01-02 23:45:02 -050010467 int rc, max_irqs;
Michael Chanc0c050c2015-10-22 16:01:17 -040010468
Ray Jui4e003382017-02-20 19:25:16 -050010469 if (pci_is_bridge(pdev))
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040010470 return -ENODEV;
10471
Michael Chanc0c050c2015-10-22 16:01:17 -040010472 if (version_printed++ == 0)
10473 pr_info("%s", version);
10474
10475 max_irqs = bnxt_get_max_irq(pdev);
10476 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10477 if (!dev)
10478 return -ENOMEM;
10479
10480 bp = netdev_priv(dev);
Michael Chan9c1fabd2018-10-14 07:02:47 -040010481 bnxt_set_max_func_irqs(bp, max_irqs);
Michael Chanc0c050c2015-10-22 16:01:17 -040010482
10483 if (bnxt_vf_pciid(ent->driver_data))
10484 bp->flags |= BNXT_FLAG_VF;
10485
Michael Chan2bcfa6f2015-12-27 18:19:24 -050010486 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -040010487 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -040010488
10489 rc = bnxt_init_board(pdev, dev);
10490 if (rc < 0)
10491 goto init_err_free;
10492
10493 dev->netdev_ops = &bnxt_netdev_ops;
10494 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10495 dev->ethtool_ops = &bnxt_ethtool_ops;
Michael Chanc0c050c2015-10-22 16:01:17 -040010496 pci_set_drvdata(pdev, dev);
10497
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010498 rc = bnxt_alloc_hwrm_resources(bp);
10499 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010500 goto init_err_pci_clean;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010501
10502 mutex_init(&bp->hwrm_cmd_lock);
10503 rc = bnxt_hwrm_ver_get(bp);
10504 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010505 goto init_err_pci_clean;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010506
Venkat Duvvuru760b6d32018-12-20 03:38:48 -050010507 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10508 rc = bnxt_alloc_kong_hwrm_resources(bp);
10509 if (rc)
10510 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10511 }
10512
Michael Chan1dfddc42018-10-14 07:02:39 -040010513 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10514 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
Deepak Khungare605db82017-05-29 19:06:04 -040010515 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10516 if (rc)
10517 goto init_err_pci_clean;
10518 }
10519
Michael Chane38287b2018-10-14 07:02:45 -040010520 if (BNXT_CHIP_P5(bp))
10521 bp->flags |= BNXT_FLAG_CHIP_P5;
10522
Michael Chan3c2217a2017-03-08 18:44:32 -050010523 rc = bnxt_hwrm_func_reset(bp);
10524 if (rc)
10525 goto init_err_pci_clean;
10526
Rob Swindell5ac67d82016-09-19 03:58:03 -040010527 bnxt_hwrm_fw_set_time(bp);
10528
Michael Chanc0c050c2015-10-22 16:01:17 -040010529 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10530 NETIF_F_TSO | NETIF_F_TSO6 |
10531 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Tom Herbert7e133182016-05-18 09:06:10 -070010532 NETIF_F_GSO_IPXIP4 |
Alexander Duyck152971e2016-05-02 09:38:55 -070010533 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10534 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010535 NETIF_F_RXCSUM | NETIF_F_GRO;
10536
Michael Chane38287b2018-10-14 07:02:45 -040010537 if (BNXT_SUPPORTS_TPA(bp))
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010538 dev->hw_features |= NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -040010539
Michael Chanc0c050c2015-10-22 16:01:17 -040010540 dev->hw_enc_features =
10541 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10542 NETIF_F_TSO | NETIF_F_TSO6 |
10543 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Alexander Duyck152971e2016-05-02 09:38:55 -070010544 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -070010545 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
Alexander Duyck152971e2016-05-02 09:38:55 -070010546 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10547 NETIF_F_GSO_GRE_CSUM;
Michael Chanc0c050c2015-10-22 16:01:17 -040010548 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10549 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10550 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
Michael Chane38287b2018-10-14 07:02:45 -040010551 if (BNXT_SUPPORTS_TPA(bp))
Michael Chan1054aee2017-12-16 03:09:42 -050010552 dev->hw_features |= NETIF_F_GRO_HW;
Michael Chanc0c050c2015-10-22 16:01:17 -040010553 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
Michael Chan1054aee2017-12-16 03:09:42 -050010554 if (dev->features & NETIF_F_GRO_HW)
10555 dev->features &= ~NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -040010556 dev->priv_flags |= IFF_UNICAST_FLT;
10557
10558#ifdef CONFIG_BNXT_SRIOV
10559 init_waitqueue_head(&bp->sriov_cfg_wait);
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040010560 mutex_init(&bp->sriov_lock);
Michael Chanc0c050c2015-10-22 16:01:17 -040010561#endif
Michael Chane38287b2018-10-14 07:02:45 -040010562 if (BNXT_SUPPORTS_TPA(bp)) {
10563 bp->gro_func = bnxt_gro_func_5730x;
10564 if (BNXT_CHIP_P4(bp))
10565 bp->gro_func = bnxt_gro_func_5731x;
10566 }
10567 if (!BNXT_CHIP_P4_PLUS(bp))
Michael Chan434c9752017-05-29 19:06:08 -040010568 bp->flags |= BNXT_FLAG_DOUBLE_DB;
Michael Chan309369c2016-06-13 02:25:34 -040010569
Michael Chanc0c050c2015-10-22 16:01:17 -040010570 rc = bnxt_hwrm_func_drv_rgtr(bp);
10571 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010572 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -040010573
Michael Chana1653b12016-12-07 00:26:20 -050010574 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10575 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010576 goto init_err_pci_clean;
Michael Chana1653b12016-12-07 00:26:20 -050010577
Michael Chana588e452016-12-07 00:26:21 -050010578 bp->ulp_probe = bnxt_ulp_probe;
10579
Michael Chan98f04cf2018-10-14 07:02:43 -040010580 rc = bnxt_hwrm_queue_qportcfg(bp);
10581 if (rc) {
10582 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10583 rc);
10584 rc = -1;
10585 goto init_err_pci_clean;
10586 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010587 /* Get the MAX capabilities for this function */
10588 rc = bnxt_hwrm_func_qcaps(bp);
10589 if (rc) {
10590 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10591 rc);
10592 rc = -1;
Sathya Perla17086392017-02-20 19:25:18 -050010593 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -040010594 }
Michael Chana22a6ac2017-08-23 19:34:05 -040010595 rc = bnxt_init_mac_addr(bp);
10596 if (rc) {
10597 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10598 rc = -EADDRNOTAVAIL;
10599 goto init_err_pci_clean;
10600 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010601
Satish Baddipadige567b2ab2016-06-13 02:25:31 -040010602 bnxt_hwrm_func_qcfg(bp);
Michael Chan6ba99032018-11-15 03:25:37 -050010603 bnxt_hwrm_vnic_qcaps(bp);
Michael Chan5ad2cbe2017-01-13 01:32:03 -050010604 bnxt_hwrm_port_led_qcaps(bp);
Michael Chaneb513652017-04-04 18:14:12 -040010605 bnxt_ethtool_init(bp);
Michael Chan87fe6032017-05-16 16:39:43 -040010606 bnxt_dcb_init(bp);
Satish Baddipadige567b2ab2016-06-13 02:25:31 -040010607
Michael Chan7eb9bb32017-10-26 11:51:25 -040010608 /* MTU range: 60 - FW defined max */
10609 dev->min_mtu = ETH_ZLEN;
10610 dev->max_mtu = bp->max_mtu;
10611
Michael Chand5430d32017-08-28 13:40:31 -040010612 rc = bnxt_probe_phy(bp);
10613 if (rc)
10614 goto init_err_pci_clean;
10615
Michael Chanc61fb992017-02-06 16:55:36 -050010616 bnxt_set_rx_skb_mode(bp, false);
Michael Chanc0c050c2015-10-22 16:01:17 -040010617 bnxt_set_tpa_flags(bp);
10618 bnxt_set_ring_params(bp);
Michael Chan702c2212017-05-29 19:06:10 -040010619 rc = bnxt_set_dflt_rings(bp, true);
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010620 if (rc) {
10621 netdev_err(bp->dev, "Not enough rings available.\n");
10622 rc = -ENOMEM;
Sathya Perla17086392017-02-20 19:25:18 -050010623 goto init_err_pci_clean;
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010624 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010625
Michael Chan87da7f72016-11-16 21:13:09 -050010626 /* Default RSS hash cfg. */
10627 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10628 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10629 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10630 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
Michael Chane38287b2018-10-14 07:02:45 -040010631 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
Michael Chan87da7f72016-11-16 21:13:09 -050010632 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10633 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10634 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10635 }
10636
Michael Chan8079e8f2016-12-29 12:13:37 -050010637 if (bnxt_rfs_supported(bp)) {
Michael Chan2bcfa6f2015-12-27 18:19:24 -050010638 dev->hw_features |= NETIF_F_NTUPLE;
10639 if (bnxt_rfs_capable(bp)) {
10640 bp->flags |= BNXT_FLAG_RFS;
10641 dev->features |= NETIF_F_NTUPLE;
10642 }
10643 }
10644
Michael Chanc0c050c2015-10-22 16:01:17 -040010645 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10646 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10647
Michael Chan78095922016-12-07 00:26:16 -050010648 rc = bnxt_init_int_mode(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010649 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010650 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -040010651
Michael Chan832aed12018-03-09 23:46:07 -050010652 /* No TC has been set yet and rings may have been trimmed due to
10653 * limited MSIX, so we re-initialize the TX rings per TC.
10654 */
10655 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10656
Michael Chanc1ef1462017-04-04 18:14:07 -040010657 bnxt_get_wol_settings(bp);
Michael Chand196ece2017-04-04 18:14:08 -040010658 if (bp->flags & BNXT_FLAG_WOL_CAP)
10659 device_set_wakeup_enable(&pdev->dev, bp->wol);
10660 else
10661 device_set_wakeup_capable(&pdev->dev, false);
Michael Chanc1ef1462017-04-04 18:14:07 -040010662
Michael Chanc3480a62018-01-17 03:21:15 -050010663 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10664
Michael Chan74706af2018-10-14 07:02:40 -040010665 bnxt_hwrm_coal_params_qcaps(bp);
10666
Michael Chanc213eae2017-10-13 21:09:29 -040010667 if (BNXT_PF(bp)) {
10668 if (!bnxt_pf_wq) {
10669 bnxt_pf_wq =
10670 create_singlethread_workqueue("bnxt_pf_wq");
10671 if (!bnxt_pf_wq) {
10672 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10673 goto init_err_pci_clean;
10674 }
10675 }
Sathya Perla2ae74082017-08-28 13:40:33 -040010676 bnxt_init_tc(bp);
Michael Chanc213eae2017-10-13 21:09:29 -040010677 }
Sathya Perla2ae74082017-08-28 13:40:33 -040010678
Michael Chan78095922016-12-07 00:26:16 -050010679 rc = register_netdev(dev);
10680 if (rc)
Sathya Perla2ae74082017-08-28 13:40:33 -040010681 goto init_err_cleanup_tc;
Michael Chan78095922016-12-07 00:26:16 -050010682
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040010683 if (BNXT_PF(bp))
10684 bnxt_dl_register(bp);
10685
Michael Chanc0c050c2015-10-22 16:01:17 -040010686 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10687 board_info[ent->driver_data].name,
10688 (long)pci_resource_start(pdev, 0), dev->dev_addr);
Bjorn Helgaasaf125b72018-03-30 14:09:54 -050010689 pcie_print_link_status(pdev);
Ajit Khaparde90c4f782016-05-15 03:04:45 -040010690
Michael Chanc0c050c2015-10-22 16:01:17 -040010691 return 0;
10692
Sathya Perla2ae74082017-08-28 13:40:33 -040010693init_err_cleanup_tc:
10694 bnxt_shutdown_tc(bp);
Michael Chan78095922016-12-07 00:26:16 -050010695 bnxt_clear_int_mode(bp);
10696
Sathya Perla17086392017-02-20 19:25:18 -050010697init_err_pci_clean:
Vasundhara Volamf9099d62019-04-25 22:31:51 -040010698 bnxt_free_hwrm_short_cmd_req(bp);
Venkat Duvvurua2bf74f2018-10-05 00:26:02 -040010699 bnxt_free_hwrm_resources(bp);
Michael Chan98f04cf2018-10-14 07:02:43 -040010700 bnxt_free_ctx_mem(bp);
10701 kfree(bp->ctx);
10702 bp->ctx = NULL;
Sathya Perla17086392017-02-20 19:25:18 -050010703 bnxt_cleanup_pci(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010704
10705init_err_free:
10706 free_netdev(dev);
10707 return rc;
10708}
10709
Michael Chand196ece2017-04-04 18:14:08 -040010710static void bnxt_shutdown(struct pci_dev *pdev)
10711{
10712 struct net_device *dev = pci_get_drvdata(pdev);
10713 struct bnxt *bp;
10714
10715 if (!dev)
10716 return;
10717
10718 rtnl_lock();
10719 bp = netdev_priv(dev);
10720 if (!bp)
10721 goto shutdown_exit;
10722
10723 if (netif_running(dev))
10724 dev_close(dev);
10725
Ray Juia7f3f932017-12-01 03:13:02 -050010726 bnxt_ulp_shutdown(bp);
10727
Michael Chand196ece2017-04-04 18:14:08 -040010728 if (system_state == SYSTEM_POWER_OFF) {
10729 bnxt_clear_int_mode(bp);
10730 pci_wake_from_d3(pdev, bp->wol);
10731 pci_set_power_state(pdev, PCI_D3hot);
10732 }
10733
10734shutdown_exit:
10735 rtnl_unlock();
10736}
10737
Michael Chanf65a2042017-04-04 18:14:11 -040010738#ifdef CONFIG_PM_SLEEP
10739static int bnxt_suspend(struct device *device)
10740{
10741 struct pci_dev *pdev = to_pci_dev(device);
10742 struct net_device *dev = pci_get_drvdata(pdev);
10743 struct bnxt *bp = netdev_priv(dev);
10744 int rc = 0;
10745
10746 rtnl_lock();
10747 if (netif_running(dev)) {
10748 netif_device_detach(dev);
10749 rc = bnxt_close(dev);
10750 }
10751 bnxt_hwrm_func_drv_unrgtr(bp);
10752 rtnl_unlock();
10753 return rc;
10754}
10755
10756static int bnxt_resume(struct device *device)
10757{
10758 struct pci_dev *pdev = to_pci_dev(device);
10759 struct net_device *dev = pci_get_drvdata(pdev);
10760 struct bnxt *bp = netdev_priv(dev);
10761 int rc = 0;
10762
10763 rtnl_lock();
10764 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10765 rc = -ENODEV;
10766 goto resume_exit;
10767 }
10768 rc = bnxt_hwrm_func_reset(bp);
10769 if (rc) {
10770 rc = -EBUSY;
10771 goto resume_exit;
10772 }
10773 bnxt_get_wol_settings(bp);
10774 if (netif_running(dev)) {
10775 rc = bnxt_open(dev);
10776 if (!rc)
10777 netif_device_attach(dev);
10778 }
10779
10780resume_exit:
10781 rtnl_unlock();
10782 return rc;
10783}
10784
10785static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10786#define BNXT_PM_OPS (&bnxt_pm_ops)
10787
10788#else
10789
10790#define BNXT_PM_OPS NULL
10791
10792#endif /* CONFIG_PM_SLEEP */
10793
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010794/**
10795 * bnxt_io_error_detected - called when PCI error is detected
10796 * @pdev: Pointer to PCI device
10797 * @state: The current pci connection state
10798 *
10799 * This function is called after a PCI bus error affecting
10800 * this device has been detected.
10801 */
10802static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10803 pci_channel_state_t state)
10804{
10805 struct net_device *netdev = pci_get_drvdata(pdev);
Michael Chana588e452016-12-07 00:26:21 -050010806 struct bnxt *bp = netdev_priv(netdev);
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010807
10808 netdev_info(netdev, "PCI I/O error detected\n");
10809
10810 rtnl_lock();
10811 netif_device_detach(netdev);
10812
Michael Chana588e452016-12-07 00:26:21 -050010813 bnxt_ulp_stop(bp);
10814
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010815 if (state == pci_channel_io_perm_failure) {
10816 rtnl_unlock();
10817 return PCI_ERS_RESULT_DISCONNECT;
10818 }
10819
10820 if (netif_running(netdev))
10821 bnxt_close(netdev);
10822
10823 pci_disable_device(pdev);
10824 rtnl_unlock();
10825
10826 /* Request a slot slot reset. */
10827 return PCI_ERS_RESULT_NEED_RESET;
10828}
10829
10830/**
10831 * bnxt_io_slot_reset - called after the pci bus has been reset.
10832 * @pdev: Pointer to PCI device
10833 *
10834 * Restart the card from scratch, as if from a cold-boot.
10835 * At this point, the card has exprienced a hard reset,
10836 * followed by fixups by BIOS, and has its config space
10837 * set up identically to what it was at cold boot.
10838 */
10839static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10840{
10841 struct net_device *netdev = pci_get_drvdata(pdev);
10842 struct bnxt *bp = netdev_priv(netdev);
10843 int err = 0;
10844 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10845
10846 netdev_info(bp->dev, "PCI Slot Reset\n");
10847
10848 rtnl_lock();
10849
10850 if (pci_enable_device(pdev)) {
10851 dev_err(&pdev->dev,
10852 "Cannot re-enable PCI device after reset.\n");
10853 } else {
10854 pci_set_master(pdev);
10855
Michael Chanaa8ed022016-12-07 00:26:17 -050010856 err = bnxt_hwrm_func_reset(bp);
10857 if (!err && netif_running(netdev))
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010858 err = bnxt_open(netdev);
10859
Michael Chana588e452016-12-07 00:26:21 -050010860 if (!err) {
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010861 result = PCI_ERS_RESULT_RECOVERED;
Michael Chana588e452016-12-07 00:26:21 -050010862 bnxt_ulp_start(bp);
10863 }
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010864 }
10865
10866 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10867 dev_close(netdev);
10868
10869 rtnl_unlock();
10870
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010871 return PCI_ERS_RESULT_RECOVERED;
10872}
10873
10874/**
10875 * bnxt_io_resume - called when traffic can start flowing again.
10876 * @pdev: Pointer to PCI device
10877 *
10878 * This callback is called when the error recovery driver tells
10879 * us that its OK to resume normal operation.
10880 */
10881static void bnxt_io_resume(struct pci_dev *pdev)
10882{
10883 struct net_device *netdev = pci_get_drvdata(pdev);
10884
10885 rtnl_lock();
10886
10887 netif_device_attach(netdev);
10888
10889 rtnl_unlock();
10890}
10891
10892static const struct pci_error_handlers bnxt_err_handler = {
10893 .error_detected = bnxt_io_error_detected,
10894 .slot_reset = bnxt_io_slot_reset,
10895 .resume = bnxt_io_resume
10896};
10897
Michael Chanc0c050c2015-10-22 16:01:17 -040010898static struct pci_driver bnxt_pci_driver = {
10899 .name = DRV_MODULE_NAME,
10900 .id_table = bnxt_pci_tbl,
10901 .probe = bnxt_init_one,
10902 .remove = bnxt_remove_one,
Michael Chand196ece2017-04-04 18:14:08 -040010903 .shutdown = bnxt_shutdown,
Michael Chanf65a2042017-04-04 18:14:11 -040010904 .driver.pm = BNXT_PM_OPS,
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010905 .err_handler = &bnxt_err_handler,
Michael Chanc0c050c2015-10-22 16:01:17 -040010906#if defined(CONFIG_BNXT_SRIOV)
10907 .sriov_configure = bnxt_sriov_configure,
10908#endif
10909};
10910
Michael Chanc213eae2017-10-13 21:09:29 -040010911static int __init bnxt_init(void)
10912{
Andy Gospodarekcabfb092018-04-26 17:44:40 -040010913 bnxt_debug_init();
Michael Chanc213eae2017-10-13 21:09:29 -040010914 return pci_register_driver(&bnxt_pci_driver);
10915}
10916
10917static void __exit bnxt_exit(void)
10918{
10919 pci_unregister_driver(&bnxt_pci_driver);
10920 if (bnxt_pf_wq)
10921 destroy_workqueue(bnxt_pf_wq);
Andy Gospodarekcabfb092018-04-26 17:44:40 -040010922 bnxt_debug_exit();
Michael Chanc213eae2017-10-13 21:09:29 -040010923}
10924
10925module_init(bnxt_init);
10926module_exit(bnxt_exit);